]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
ASoC: cs42l52: Improve two size determinations in cs42l52_i2c_probe()
[linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/types.h>
25 #include <linux/compiler.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/in.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <linux/ip.h>
43 #include <linux/tcp.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/firmware.h>
48 #include <linux/ssb/ssb_driver_gige.h>
49 #include <linux/hwmon.h>
50 #include <linux/hwmon-sysfs.h>
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #include <uapi/linux/net_tstamp.h>
60 #include <linux/ptp_clock_kernel.h>
61
62 #ifdef CONFIG_SPARC
63 #include <asm/idprom.h>
64 #include <asm/prom.h>
65 #endif
66
67 #define BAR_0   0
68 #define BAR_2   2
69
70 #include "tg3.h"
71
72 /* Functions & macros to verify TG3_FLAGS types */
73
74 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76         return test_bit(flag, bits);
77 }
78
79 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81         set_bit(flag, bits);
82 }
83
84 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 {
86         clear_bit(flag, bits);
87 }
88
89 #define tg3_flag(tp, flag)                              \
90         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_set(tp, flag)                          \
92         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_clear(tp, flag)                        \
94         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95
96 #define DRV_MODULE_NAME         "tg3"
97 #define TG3_MAJ_NUM                     3
98 #define TG3_MIN_NUM                     137
99 #define DRV_MODULE_VERSION      \
100         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
101 #define DRV_MODULE_RELDATE      "May 11, 2014"
102
103 #define RESET_KIND_SHUTDOWN     0
104 #define RESET_KIND_INIT         1
105 #define RESET_KIND_SUSPEND      2
106
107 #define TG3_DEF_RX_MODE         0
108 #define TG3_DEF_TX_MODE         0
109 #define TG3_DEF_MSG_ENABLE        \
110         (NETIF_MSG_DRV          | \
111          NETIF_MSG_PROBE        | \
112          NETIF_MSG_LINK         | \
113          NETIF_MSG_TIMER        | \
114          NETIF_MSG_IFDOWN       | \
115          NETIF_MSG_IFUP         | \
116          NETIF_MSG_RX_ERR       | \
117          NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
120
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124
125 #define TG3_TX_TIMEOUT                  (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU                     ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING         200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151
152 #define TG3_TX_RING_SIZE                512
153 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
162                                  TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB               64
166
167 #define TG3_RX_STD_DMA_SZ               1536
168 #define TG3_RX_JMB_DMA_SZ               9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD           256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
195 #else
196         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K            2048
208 #define TG3_TX_BD_DMA_MAX_4K            4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
216 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3            "tigon/tg3.bin"
219 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
222
223 static char version[] =
224         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
225
226 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
227 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(DRV_MODULE_VERSION);
230 MODULE_FIRMWARE(FIRMWARE_TG3);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
232 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
233
234 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
235 module_param(tg3_debug, int, 0);
236 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
237
238 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
239 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
240
241 static const struct pci_device_id tg3_pci_tbl[] = {
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
261          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
262                         TG3_DRV_DATA_FLAG_5705_10_100},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
268          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269                         TG3_DRV_DATA_FLAG_5705_10_100},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
276          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
282          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
290         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
291                         PCI_VENDOR_ID_LENOVO,
292                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
293          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
296          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
315         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
316                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
319                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
324          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
334          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
349         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
350         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
355         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
356         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
357         {}
358 };
359
360 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
361
362 static const struct {
363         const char string[ETH_GSTRING_LEN];
364 } ethtool_stats_keys[] = {
365         { "rx_octets" },
366         { "rx_fragments" },
367         { "rx_ucast_packets" },
368         { "rx_mcast_packets" },
369         { "rx_bcast_packets" },
370         { "rx_fcs_errors" },
371         { "rx_align_errors" },
372         { "rx_xon_pause_rcvd" },
373         { "rx_xoff_pause_rcvd" },
374         { "rx_mac_ctrl_rcvd" },
375         { "rx_xoff_entered" },
376         { "rx_frame_too_long_errors" },
377         { "rx_jabbers" },
378         { "rx_undersize_packets" },
379         { "rx_in_length_errors" },
380         { "rx_out_length_errors" },
381         { "rx_64_or_less_octet_packets" },
382         { "rx_65_to_127_octet_packets" },
383         { "rx_128_to_255_octet_packets" },
384         { "rx_256_to_511_octet_packets" },
385         { "rx_512_to_1023_octet_packets" },
386         { "rx_1024_to_1522_octet_packets" },
387         { "rx_1523_to_2047_octet_packets" },
388         { "rx_2048_to_4095_octet_packets" },
389         { "rx_4096_to_8191_octet_packets" },
390         { "rx_8192_to_9022_octet_packets" },
391
392         { "tx_octets" },
393         { "tx_collisions" },
394
395         { "tx_xon_sent" },
396         { "tx_xoff_sent" },
397         { "tx_flow_control" },
398         { "tx_mac_errors" },
399         { "tx_single_collisions" },
400         { "tx_mult_collisions" },
401         { "tx_deferred" },
402         { "tx_excessive_collisions" },
403         { "tx_late_collisions" },
404         { "tx_collide_2times" },
405         { "tx_collide_3times" },
406         { "tx_collide_4times" },
407         { "tx_collide_5times" },
408         { "tx_collide_6times" },
409         { "tx_collide_7times" },
410         { "tx_collide_8times" },
411         { "tx_collide_9times" },
412         { "tx_collide_10times" },
413         { "tx_collide_11times" },
414         { "tx_collide_12times" },
415         { "tx_collide_13times" },
416         { "tx_collide_14times" },
417         { "tx_collide_15times" },
418         { "tx_ucast_packets" },
419         { "tx_mcast_packets" },
420         { "tx_bcast_packets" },
421         { "tx_carrier_sense_errors" },
422         { "tx_discards" },
423         { "tx_errors" },
424
425         { "dma_writeq_full" },
426         { "dma_write_prioq_full" },
427         { "rxbds_empty" },
428         { "rx_discards" },
429         { "rx_errors" },
430         { "rx_threshold_hit" },
431
432         { "dma_readq_full" },
433         { "dma_read_prioq_full" },
434         { "tx_comp_queue_full" },
435
436         { "ring_set_send_prod_index" },
437         { "ring_status_update" },
438         { "nic_irqs" },
439         { "nic_avoided_irqs" },
440         { "nic_tx_threshold_hit" },
441
442         { "mbuf_lwm_thresh_hit" },
443 };
444
445 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
446 #define TG3_NVRAM_TEST          0
447 #define TG3_LINK_TEST           1
448 #define TG3_REGISTER_TEST       2
449 #define TG3_MEMORY_TEST         3
450 #define TG3_MAC_LOOPB_TEST      4
451 #define TG3_PHY_LOOPB_TEST      5
452 #define TG3_EXT_LOOPB_TEST      6
453 #define TG3_INTERRUPT_TEST      7
454
455
456 static const struct {
457         const char string[ETH_GSTRING_LEN];
458 } ethtool_test_keys[] = {
459         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
460         [TG3_LINK_TEST]         = { "link test         (online) " },
461         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
462         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
463         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
464         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
465         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
466         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
467 };
468
469 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
470
471
472 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->regs + off);
475 }
476
477 static u32 tg3_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->regs + off);
480 }
481
482 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
483 {
484         writel(val, tp->aperegs + off);
485 }
486
487 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
488 {
489         return readl(tp->aperegs + off);
490 }
491
492 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         unsigned long flags;
495
496         spin_lock_irqsave(&tp->indirect_lock, flags);
497         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
499         spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 }
501
502 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
503 {
504         writel(val, tp->regs + off);
505         readl(tp->regs + off);
506 }
507
508 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 {
510         unsigned long flags;
511         u32 val;
512
513         spin_lock_irqsave(&tp->indirect_lock, flags);
514         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
515         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517         return val;
518 }
519
520 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522         unsigned long flags;
523
524         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
525                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
526                                        TG3_64BIT_REG_LOW, val);
527                 return;
528         }
529         if (off == TG3_RX_STD_PROD_IDX_REG) {
530                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
531                                        TG3_64BIT_REG_LOW, val);
532                 return;
533         }
534
535         spin_lock_irqsave(&tp->indirect_lock, flags);
536         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
537         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
538         spin_unlock_irqrestore(&tp->indirect_lock, flags);
539
540         /* In indirect mode when disabling interrupts, we also need
541          * to clear the interrupt bit in the GRC local ctrl register.
542          */
543         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
544             (val == 0x1)) {
545                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
546                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
547         }
548 }
549
550 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 {
552         unsigned long flags;
553         u32 val;
554
555         spin_lock_irqsave(&tp->indirect_lock, flags);
556         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
557         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
558         spin_unlock_irqrestore(&tp->indirect_lock, flags);
559         return val;
560 }
561
562 /* usec_wait specifies the wait time in usec when writing to certain registers
563  * where it is unsafe to read back the register without some delay.
564  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
565  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
566  */
567 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
568 {
569         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
570                 /* Non-posted methods */
571                 tp->write32(tp, off, val);
572         else {
573                 /* Posted method */
574                 tg3_write32(tp, off, val);
575                 if (usec_wait)
576                         udelay(usec_wait);
577                 tp->read32(tp, off);
578         }
579         /* Wait again after the read for the posted method to guarantee that
580          * the wait time is met.
581          */
582         if (usec_wait)
583                 udelay(usec_wait);
584 }
585
586 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
587 {
588         tp->write32_mbox(tp, off, val);
589         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
590             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
591              !tg3_flag(tp, ICH_WORKAROUND)))
592                 tp->read32_mbox(tp, off);
593 }
594
595 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
596 {
597         void __iomem *mbox = tp->regs + off;
598         writel(val, mbox);
599         if (tg3_flag(tp, TXD_MBOX_HWBUG))
600                 writel(val, mbox);
601         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
602             tg3_flag(tp, FLUSH_POSTED_WRITES))
603                 readl(mbox);
604 }
605
606 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
607 {
608         return readl(tp->regs + off + GRCMBOX_BASE);
609 }
610
611 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
612 {
613         writel(val, tp->regs + off + GRCMBOX_BASE);
614 }
615
616 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
617 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
618 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
619 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
620 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
621
622 #define tw32(reg, val)                  tp->write32(tp, reg, val)
623 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
624 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
625 #define tr32(reg)                       tp->read32(tp, reg)
626
627 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
628 {
629         unsigned long flags;
630
631         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
632             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633                 return;
634
635         spin_lock_irqsave(&tp->indirect_lock, flags);
636         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
638                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
639
640                 /* Always leave this as zero. */
641                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
642         } else {
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
644                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
645
646                 /* Always leave this as zero. */
647                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
648         }
649         spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 }
651
652 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
653 {
654         unsigned long flags;
655
656         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
657             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658                 *val = 0;
659                 return;
660         }
661
662         spin_lock_irqsave(&tp->indirect_lock, flags);
663         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
665                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
666
667                 /* Always leave this as zero. */
668                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
669         } else {
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
671                 *val = tr32(TG3PCI_MEM_WIN_DATA);
672
673                 /* Always leave this as zero. */
674                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
675         }
676         spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 }
678
679 static void tg3_ape_lock_init(struct tg3 *tp)
680 {
681         int i;
682         u32 regbase, bit;
683
684         if (tg3_asic_rev(tp) == ASIC_REV_5761)
685                 regbase = TG3_APE_LOCK_GRANT;
686         else
687                 regbase = TG3_APE_PER_LOCK_GRANT;
688
689         /* Make sure the driver hasn't any stale locks. */
690         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
691                 switch (i) {
692                 case TG3_APE_LOCK_PHY0:
693                 case TG3_APE_LOCK_PHY1:
694                 case TG3_APE_LOCK_PHY2:
695                 case TG3_APE_LOCK_PHY3:
696                         bit = APE_LOCK_GRANT_DRIVER;
697                         break;
698                 default:
699                         if (!tp->pci_fn)
700                                 bit = APE_LOCK_GRANT_DRIVER;
701                         else
702                                 bit = 1 << tp->pci_fn;
703                 }
704                 tg3_ape_write32(tp, regbase + 4 * i, bit);
705         }
706
707 }
708
709 static int tg3_ape_lock(struct tg3 *tp, int locknum)
710 {
711         int i, off;
712         int ret = 0;
713         u32 status, req, gnt, bit;
714
715         if (!tg3_flag(tp, ENABLE_APE))
716                 return 0;
717
718         switch (locknum) {
719         case TG3_APE_LOCK_GPIO:
720                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721                         return 0;
722         case TG3_APE_LOCK_GRC:
723         case TG3_APE_LOCK_MEM:
724                 if (!tp->pci_fn)
725                         bit = APE_LOCK_REQ_DRIVER;
726                 else
727                         bit = 1 << tp->pci_fn;
728                 break;
729         case TG3_APE_LOCK_PHY0:
730         case TG3_APE_LOCK_PHY1:
731         case TG3_APE_LOCK_PHY2:
732         case TG3_APE_LOCK_PHY3:
733                 bit = APE_LOCK_REQ_DRIVER;
734                 break;
735         default:
736                 return -EINVAL;
737         }
738
739         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740                 req = TG3_APE_LOCK_REQ;
741                 gnt = TG3_APE_LOCK_GRANT;
742         } else {
743                 req = TG3_APE_PER_LOCK_REQ;
744                 gnt = TG3_APE_PER_LOCK_GRANT;
745         }
746
747         off = 4 * locknum;
748
749         tg3_ape_write32(tp, req + off, bit);
750
751         /* Wait for up to 1 millisecond to acquire lock. */
752         for (i = 0; i < 100; i++) {
753                 status = tg3_ape_read32(tp, gnt + off);
754                 if (status == bit)
755                         break;
756                 if (pci_channel_offline(tp->pdev))
757                         break;
758
759                 udelay(10);
760         }
761
762         if (status != bit) {
763                 /* Revoke the lock request. */
764                 tg3_ape_write32(tp, gnt + off, bit);
765                 ret = -EBUSY;
766         }
767
768         return ret;
769 }
770
771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773         u32 gnt, bit;
774
775         if (!tg3_flag(tp, ENABLE_APE))
776                 return;
777
778         switch (locknum) {
779         case TG3_APE_LOCK_GPIO:
780                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
781                         return;
782         case TG3_APE_LOCK_GRC:
783         case TG3_APE_LOCK_MEM:
784                 if (!tp->pci_fn)
785                         bit = APE_LOCK_GRANT_DRIVER;
786                 else
787                         bit = 1 << tp->pci_fn;
788                 break;
789         case TG3_APE_LOCK_PHY0:
790         case TG3_APE_LOCK_PHY1:
791         case TG3_APE_LOCK_PHY2:
792         case TG3_APE_LOCK_PHY3:
793                 bit = APE_LOCK_GRANT_DRIVER;
794                 break;
795         default:
796                 return;
797         }
798
799         if (tg3_asic_rev(tp) == ASIC_REV_5761)
800                 gnt = TG3_APE_LOCK_GRANT;
801         else
802                 gnt = TG3_APE_PER_LOCK_GRANT;
803
804         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809         u32 apedata;
810
811         while (timeout_us) {
812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813                         return -EBUSY;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821                 udelay(10);
822                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823         }
824
825         return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831         u32 i, apedata;
832
833         for (i = 0; i < timeout_us / 10; i++) {
834                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837                         break;
838
839                 udelay(10);
840         }
841
842         return i == timeout_us / 10;
843 }
844
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846                                    u32 len)
847 {
848         int err;
849         u32 i, bufoff, msgoff, maxlen, apedata;
850
851         if (!tg3_flag(tp, APE_HAS_NCSI))
852                 return 0;
853
854         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855         if (apedata != APE_SEG_SIG_MAGIC)
856                 return -ENODEV;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859         if (!(apedata & APE_FW_STATUS_READY))
860                 return -EAGAIN;
861
862         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863                  TG3_APE_SHMEM_BASE;
864         msgoff = bufoff + 2 * sizeof(u32);
865         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867         while (len) {
868                 u32 length;
869
870                 /* Cap xfer sizes to scratchpad limits. */
871                 length = (len > maxlen) ? maxlen : len;
872                 len -= length;
873
874                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875                 if (!(apedata & APE_FW_STATUS_READY))
876                         return -EAGAIN;
877
878                 /* Wait for up to 1 msec for APE to service previous event. */
879                 err = tg3_ape_event_lock(tp, 1000);
880                 if (err)
881                         return err;
882
883                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884                           APE_EVENT_STATUS_SCRTCHPD_READ |
885                           APE_EVENT_STATUS_EVENT_PENDING;
886                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888                 tg3_ape_write32(tp, bufoff, base_off);
889                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894                 base_off += length;
895
896                 if (tg3_ape_wait_for_event(tp, 30000))
897                         return -EAGAIN;
898
899                 for (i = 0; length; i += 4, length -= 4) {
900                         u32 val = tg3_ape_read32(tp, msgoff + i);
901                         memcpy(data, &val, sizeof(u32));
902                         data++;
903                 }
904         }
905
906         return 0;
907 }
908 #endif
909
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912         int err;
913         u32 apedata;
914
915         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916         if (apedata != APE_SEG_SIG_MAGIC)
917                 return -EAGAIN;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920         if (!(apedata & APE_FW_STATUS_READY))
921                 return -EAGAIN;
922
923         /* Wait for up to 1 millisecond for APE to service previous event. */
924         err = tg3_ape_event_lock(tp, 1000);
925         if (err)
926                 return err;
927
928         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929                         event | APE_EVENT_STATUS_EVENT_PENDING);
930
931         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934         return 0;
935 }
936
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939         u32 event;
940         u32 apedata;
941
942         if (!tg3_flag(tp, ENABLE_APE))
943                 return;
944
945         switch (kind) {
946         case RESET_KIND_INIT:
947                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
948                                 APE_HOST_SEG_SIG_MAGIC);
949                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
950                                 APE_HOST_SEG_LEN_MAGIC);
951                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
952                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
953                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
954                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
955                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
956                                 APE_HOST_BEHAV_NO_PHYLOCK);
957                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
958                                     TG3_APE_HOST_DRVR_STATE_START);
959
960                 event = APE_EVENT_STATUS_STATE_START;
961                 break;
962         case RESET_KIND_SHUTDOWN:
963                 /* With the interface we are currently using,
964                  * APE does not track driver state.  Wiping
965                  * out the HOST SEGMENT SIGNATURE forces
966                  * the APE to assume OS absent status.
967                  */
968                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
969
970                 if (device_may_wakeup(&tp->pdev->dev) &&
971                     tg3_flag(tp, WOL_ENABLE)) {
972                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
973                                             TG3_APE_HOST_WOL_SPEED_AUTO);
974                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
975                 } else
976                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
977
978                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
979
980                 event = APE_EVENT_STATUS_STATE_UNLOAD;
981                 break;
982         default:
983                 return;
984         }
985
986         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
987
988         tg3_ape_send_event(tp, event);
989 }
990
991 static void tg3_disable_ints(struct tg3 *tp)
992 {
993         int i;
994
995         tw32(TG3PCI_MISC_HOST_CTRL,
996              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
997         for (i = 0; i < tp->irq_max; i++)
998                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
999 }
1000
1001 static void tg3_enable_ints(struct tg3 *tp)
1002 {
1003         int i;
1004
1005         tp->irq_sync = 0;
1006         wmb();
1007
1008         tw32(TG3PCI_MISC_HOST_CTRL,
1009              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1010
1011         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1012         for (i = 0; i < tp->irq_cnt; i++) {
1013                 struct tg3_napi *tnapi = &tp->napi[i];
1014
1015                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1016                 if (tg3_flag(tp, 1SHOT_MSI))
1017                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1018
1019                 tp->coal_now |= tnapi->coal_now;
1020         }
1021
1022         /* Force an initial interrupt */
1023         if (!tg3_flag(tp, TAGGED_STATUS) &&
1024             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1025                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1026         else
1027                 tw32(HOSTCC_MODE, tp->coal_now);
1028
1029         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1030 }
1031
1032 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1033 {
1034         struct tg3 *tp = tnapi->tp;
1035         struct tg3_hw_status *sblk = tnapi->hw_status;
1036         unsigned int work_exists = 0;
1037
1038         /* check for phy events */
1039         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1040                 if (sblk->status & SD_STATUS_LINK_CHG)
1041                         work_exists = 1;
1042         }
1043
1044         /* check for TX work to do */
1045         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1046                 work_exists = 1;
1047
1048         /* check for RX work to do */
1049         if (tnapi->rx_rcb_prod_idx &&
1050             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1051                 work_exists = 1;
1052
1053         return work_exists;
1054 }
1055
1056 /* tg3_int_reenable
1057  *  similar to tg3_enable_ints, but it accurately determines whether there
1058  *  is new work pending and can return without flushing the PIO write
1059  *  which reenables interrupts
1060  */
1061 static void tg3_int_reenable(struct tg3_napi *tnapi)
1062 {
1063         struct tg3 *tp = tnapi->tp;
1064
1065         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1066         mmiowb();
1067
1068         /* When doing tagged status, this work check is unnecessary.
1069          * The last_tag we write above tells the chip which piece of
1070          * work we've completed.
1071          */
1072         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1073                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1074                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1075 }
1076
1077 static void tg3_switch_clocks(struct tg3 *tp)
1078 {
1079         u32 clock_ctrl;
1080         u32 orig_clock_ctrl;
1081
1082         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1083                 return;
1084
1085         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1086
1087         orig_clock_ctrl = clock_ctrl;
1088         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1089                        CLOCK_CTRL_CLKRUN_OENABLE |
1090                        0x1f);
1091         tp->pci_clock_ctrl = clock_ctrl;
1092
1093         if (tg3_flag(tp, 5705_PLUS)) {
1094                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1095                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1097                 }
1098         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1099                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1100                             clock_ctrl |
1101                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1102                             40);
1103                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1105                             40);
1106         }
1107         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1108 }
1109
1110 #define PHY_BUSY_LOOPS  5000
1111
1112 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1113                          u32 *val)
1114 {
1115         u32 frame_val;
1116         unsigned int loops;
1117         int ret;
1118
1119         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1120                 tw32_f(MAC_MI_MODE,
1121                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1122                 udelay(80);
1123         }
1124
1125         tg3_ape_lock(tp, tp->phy_ape_lock);
1126
1127         *val = 0x0;
1128
1129         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1130                       MI_COM_PHY_ADDR_MASK);
1131         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1132                       MI_COM_REG_ADDR_MASK);
1133         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1134
1135         tw32_f(MAC_MI_COM, frame_val);
1136
1137         loops = PHY_BUSY_LOOPS;
1138         while (loops != 0) {
1139                 udelay(10);
1140                 frame_val = tr32(MAC_MI_COM);
1141
1142                 if ((frame_val & MI_COM_BUSY) == 0) {
1143                         udelay(5);
1144                         frame_val = tr32(MAC_MI_COM);
1145                         break;
1146                 }
1147                 loops -= 1;
1148         }
1149
1150         ret = -EBUSY;
1151         if (loops != 0) {
1152                 *val = frame_val & MI_COM_DATA_MASK;
1153                 ret = 0;
1154         }
1155
1156         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1157                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1158                 udelay(80);
1159         }
1160
1161         tg3_ape_unlock(tp, tp->phy_ape_lock);
1162
1163         return ret;
1164 }
1165
1166 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1167 {
1168         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1169 }
1170
1171 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1172                           u32 val)
1173 {
1174         u32 frame_val;
1175         unsigned int loops;
1176         int ret;
1177
1178         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1179             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1180                 return 0;
1181
1182         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1183                 tw32_f(MAC_MI_MODE,
1184                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1185                 udelay(80);
1186         }
1187
1188         tg3_ape_lock(tp, tp->phy_ape_lock);
1189
1190         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1191                       MI_COM_PHY_ADDR_MASK);
1192         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1193                       MI_COM_REG_ADDR_MASK);
1194         frame_val |= (val & MI_COM_DATA_MASK);
1195         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1196
1197         tw32_f(MAC_MI_COM, frame_val);
1198
1199         loops = PHY_BUSY_LOOPS;
1200         while (loops != 0) {
1201                 udelay(10);
1202                 frame_val = tr32(MAC_MI_COM);
1203                 if ((frame_val & MI_COM_BUSY) == 0) {
1204                         udelay(5);
1205                         frame_val = tr32(MAC_MI_COM);
1206                         break;
1207                 }
1208                 loops -= 1;
1209         }
1210
1211         ret = -EBUSY;
1212         if (loops != 0)
1213                 ret = 0;
1214
1215         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1216                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1217                 udelay(80);
1218         }
1219
1220         tg3_ape_unlock(tp, tp->phy_ape_lock);
1221
1222         return ret;
1223 }
1224
1225 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1226 {
1227         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1228 }
1229
1230 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1231 {
1232         int err;
1233
1234         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1235         if (err)
1236                 goto done;
1237
1238         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1239         if (err)
1240                 goto done;
1241
1242         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1243                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1248
1249 done:
1250         return err;
1251 }
1252
1253 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1254 {
1255         int err;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1258         if (err)
1259                 goto done;
1260
1261         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1262         if (err)
1263                 goto done;
1264
1265         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1266                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1267         if (err)
1268                 goto done;
1269
1270         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1271
1272 done:
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1292         if (!err)
1293                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1294
1295         return err;
1296 }
1297
1298 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1299 {
1300         int err;
1301
1302         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1303                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1304                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1305         if (!err)
1306                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1307
1308         return err;
1309 }
1310
1311 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1312 {
1313         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1314                 set |= MII_TG3_AUXCTL_MISC_WREN;
1315
1316         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1317 }
1318
1319 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1320 {
1321         u32 val;
1322         int err;
1323
1324         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1325
1326         if (err)
1327                 return err;
1328
1329         if (enable)
1330                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1331         else
1332                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1333
1334         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1335                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1336
1337         return err;
1338 }
1339
1340 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1341 {
1342         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1343                             reg | val | MII_TG3_MISC_SHDW_WREN);
1344 }
1345
1346 static int tg3_bmcr_reset(struct tg3 *tp)
1347 {
1348         u32 phy_control;
1349         int limit, err;
1350
1351         /* OK, reset it, and poll the BMCR_RESET bit until it
1352          * clears or we time out.
1353          */
1354         phy_control = BMCR_RESET;
1355         err = tg3_writephy(tp, MII_BMCR, phy_control);
1356         if (err != 0)
1357                 return -EBUSY;
1358
1359         limit = 5000;
1360         while (limit--) {
1361                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1362                 if (err != 0)
1363                         return -EBUSY;
1364
1365                 if ((phy_control & BMCR_RESET) == 0) {
1366                         udelay(40);
1367                         break;
1368                 }
1369                 udelay(10);
1370         }
1371         if (limit < 0)
1372                 return -EBUSY;
1373
1374         return 0;
1375 }
1376
1377 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1378 {
1379         struct tg3 *tp = bp->priv;
1380         u32 val;
1381
1382         spin_lock_bh(&tp->lock);
1383
1384         if (__tg3_readphy(tp, mii_id, reg, &val))
1385                 val = -EIO;
1386
1387         spin_unlock_bh(&tp->lock);
1388
1389         return val;
1390 }
1391
1392 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1393 {
1394         struct tg3 *tp = bp->priv;
1395         u32 ret = 0;
1396
1397         spin_lock_bh(&tp->lock);
1398
1399         if (__tg3_writephy(tp, mii_id, reg, val))
1400                 ret = -EIO;
1401
1402         spin_unlock_bh(&tp->lock);
1403
1404         return ret;
1405 }
1406
1407 static void tg3_mdio_config_5785(struct tg3 *tp)
1408 {
1409         u32 val;
1410         struct phy_device *phydev;
1411
1412         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1413         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1414         case PHY_ID_BCM50610:
1415         case PHY_ID_BCM50610M:
1416                 val = MAC_PHYCFG2_50610_LED_MODES;
1417                 break;
1418         case PHY_ID_BCMAC131:
1419                 val = MAC_PHYCFG2_AC131_LED_MODES;
1420                 break;
1421         case PHY_ID_RTL8211C:
1422                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1423                 break;
1424         case PHY_ID_RTL8201E:
1425                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1426                 break;
1427         default:
1428                 return;
1429         }
1430
1431         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1432                 tw32(MAC_PHYCFG2, val);
1433
1434                 val = tr32(MAC_PHYCFG1);
1435                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1436                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1437                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1438                 tw32(MAC_PHYCFG1, val);
1439
1440                 return;
1441         }
1442
1443         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1444                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1445                        MAC_PHYCFG2_FMODE_MASK_MASK |
1446                        MAC_PHYCFG2_GMODE_MASK_MASK |
1447                        MAC_PHYCFG2_ACT_MASK_MASK   |
1448                        MAC_PHYCFG2_QUAL_MASK_MASK |
1449                        MAC_PHYCFG2_INBAND_ENABLE;
1450
1451         tw32(MAC_PHYCFG2, val);
1452
1453         val = tr32(MAC_PHYCFG1);
1454         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1455                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1456         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1457                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1458                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1459                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1460                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1461         }
1462         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1463                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1464         tw32(MAC_PHYCFG1, val);
1465
1466         val = tr32(MAC_EXT_RGMII_MODE);
1467         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1468                  MAC_RGMII_MODE_RX_QUALITY |
1469                  MAC_RGMII_MODE_RX_ACTIVITY |
1470                  MAC_RGMII_MODE_RX_ENG_DET |
1471                  MAC_RGMII_MODE_TX_ENABLE |
1472                  MAC_RGMII_MODE_TX_LOWPWR |
1473                  MAC_RGMII_MODE_TX_RESET);
1474         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1475                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1476                         val |= MAC_RGMII_MODE_RX_INT_B |
1477                                MAC_RGMII_MODE_RX_QUALITY |
1478                                MAC_RGMII_MODE_RX_ACTIVITY |
1479                                MAC_RGMII_MODE_RX_ENG_DET;
1480                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1481                         val |= MAC_RGMII_MODE_TX_ENABLE |
1482                                MAC_RGMII_MODE_TX_LOWPWR |
1483                                MAC_RGMII_MODE_TX_RESET;
1484         }
1485         tw32(MAC_EXT_RGMII_MODE, val);
1486 }
1487
1488 static void tg3_mdio_start(struct tg3 *tp)
1489 {
1490         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1491         tw32_f(MAC_MI_MODE, tp->mi_mode);
1492         udelay(80);
1493
1494         if (tg3_flag(tp, MDIOBUS_INITED) &&
1495             tg3_asic_rev(tp) == ASIC_REV_5785)
1496                 tg3_mdio_config_5785(tp);
1497 }
1498
1499 static int tg3_mdio_init(struct tg3 *tp)
1500 {
1501         int i;
1502         u32 reg;
1503         struct phy_device *phydev;
1504
1505         if (tg3_flag(tp, 5717_PLUS)) {
1506                 u32 is_serdes;
1507
1508                 tp->phy_addr = tp->pci_fn + 1;
1509
1510                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1511                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1512                 else
1513                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1514                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1515                 if (is_serdes)
1516                         tp->phy_addr += 7;
1517         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1518                 int addr;
1519
1520                 addr = ssb_gige_get_phyaddr(tp->pdev);
1521                 if (addr < 0)
1522                         return addr;
1523                 tp->phy_addr = addr;
1524         } else
1525                 tp->phy_addr = TG3_PHY_MII_ADDR;
1526
1527         tg3_mdio_start(tp);
1528
1529         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1530                 return 0;
1531
1532         tp->mdio_bus = mdiobus_alloc();
1533         if (tp->mdio_bus == NULL)
1534                 return -ENOMEM;
1535
1536         tp->mdio_bus->name     = "tg3 mdio bus";
1537         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1538                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1539         tp->mdio_bus->priv     = tp;
1540         tp->mdio_bus->parent   = &tp->pdev->dev;
1541         tp->mdio_bus->read     = &tg3_mdio_read;
1542         tp->mdio_bus->write    = &tg3_mdio_write;
1543         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544
1545         /* The bus registration will look for all the PHYs on the mdio bus.
1546          * Unfortunately, it does not ensure the PHY is powered up before
1547          * accessing the PHY ID registers.  A chip reset is the
1548          * quickest way to bring the device back to an operational state..
1549          */
1550         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1551                 tg3_bmcr_reset(tp);
1552
1553         i = mdiobus_register(tp->mdio_bus);
1554         if (i) {
1555                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1556                 mdiobus_free(tp->mdio_bus);
1557                 return i;
1558         }
1559
1560         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1561
1562         if (!phydev || !phydev->drv) {
1563                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1564                 mdiobus_unregister(tp->mdio_bus);
1565                 mdiobus_free(tp->mdio_bus);
1566                 return -ENODEV;
1567         }
1568
1569         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1570         case PHY_ID_BCM57780:
1571                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1572                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1573                 break;
1574         case PHY_ID_BCM50610:
1575         case PHY_ID_BCM50610M:
1576                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1577                                      PHY_BRCM_RX_REFCLK_UNUSED |
1578                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1579                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1581                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1582                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1583                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1584                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1585                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1586                 /* fallthru */
1587         case PHY_ID_RTL8211C:
1588                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1589                 break;
1590         case PHY_ID_RTL8201E:
1591         case PHY_ID_BCMAC131:
1592                 phydev->interface = PHY_INTERFACE_MODE_MII;
1593                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1594                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1595                 break;
1596         }
1597
1598         tg3_flag_set(tp, MDIOBUS_INITED);
1599
1600         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1601                 tg3_mdio_config_5785(tp);
1602
1603         return 0;
1604 }
1605
1606 static void tg3_mdio_fini(struct tg3 *tp)
1607 {
1608         if (tg3_flag(tp, MDIOBUS_INITED)) {
1609                 tg3_flag_clear(tp, MDIOBUS_INITED);
1610                 mdiobus_unregister(tp->mdio_bus);
1611                 mdiobus_free(tp->mdio_bus);
1612         }
1613 }
1614
1615 /* tp->lock is held. */
1616 static inline void tg3_generate_fw_event(struct tg3 *tp)
1617 {
1618         u32 val;
1619
1620         val = tr32(GRC_RX_CPU_EVENT);
1621         val |= GRC_RX_CPU_DRIVER_EVENT;
1622         tw32_f(GRC_RX_CPU_EVENT, val);
1623
1624         tp->last_event_jiffies = jiffies;
1625 }
1626
1627 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1628
1629 /* tp->lock is held. */
1630 static void tg3_wait_for_event_ack(struct tg3 *tp)
1631 {
1632         int i;
1633         unsigned int delay_cnt;
1634         long time_remain;
1635
1636         /* If enough time has passed, no wait is necessary. */
1637         time_remain = (long)(tp->last_event_jiffies + 1 +
1638                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1639                       (long)jiffies;
1640         if (time_remain < 0)
1641                 return;
1642
1643         /* Check if we can shorten the wait time. */
1644         delay_cnt = jiffies_to_usecs(time_remain);
1645         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1646                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1647         delay_cnt = (delay_cnt >> 3) + 1;
1648
1649         for (i = 0; i < delay_cnt; i++) {
1650                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1651                         break;
1652                 if (pci_channel_offline(tp->pdev))
1653                         break;
1654
1655                 udelay(8);
1656         }
1657 }
1658
1659 /* tp->lock is held. */
1660 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1661 {
1662         u32 reg, val;
1663
1664         val = 0;
1665         if (!tg3_readphy(tp, MII_BMCR, &reg))
1666                 val = reg << 16;
1667         if (!tg3_readphy(tp, MII_BMSR, &reg))
1668                 val |= (reg & 0xffff);
1669         *data++ = val;
1670
1671         val = 0;
1672         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1673                 val = reg << 16;
1674         if (!tg3_readphy(tp, MII_LPA, &reg))
1675                 val |= (reg & 0xffff);
1676         *data++ = val;
1677
1678         val = 0;
1679         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1680                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1681                         val = reg << 16;
1682                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1683                         val |= (reg & 0xffff);
1684         }
1685         *data++ = val;
1686
1687         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1688                 val = reg << 16;
1689         else
1690                 val = 0;
1691         *data++ = val;
1692 }
1693
1694 /* tp->lock is held. */
1695 static void tg3_ump_link_report(struct tg3 *tp)
1696 {
1697         u32 data[4];
1698
1699         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1700                 return;
1701
1702         tg3_phy_gather_ump_data(tp, data);
1703
1704         tg3_wait_for_event_ack(tp);
1705
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1710         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1711         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1712
1713         tg3_generate_fw_event(tp);
1714 }
1715
1716 /* tp->lock is held. */
1717 static void tg3_stop_fw(struct tg3 *tp)
1718 {
1719         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1720                 /* Wait for RX cpu to ACK the previous event. */
1721                 tg3_wait_for_event_ack(tp);
1722
1723                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1724
1725                 tg3_generate_fw_event(tp);
1726
1727                 /* Wait for RX cpu to ACK this event. */
1728                 tg3_wait_for_event_ack(tp);
1729         }
1730 }
1731
1732 /* tp->lock is held. */
1733 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1734 {
1735         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1736                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1737
1738         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1739                 switch (kind) {
1740                 case RESET_KIND_INIT:
1741                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742                                       DRV_STATE_START);
1743                         break;
1744
1745                 case RESET_KIND_SHUTDOWN:
1746                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747                                       DRV_STATE_UNLOAD);
1748                         break;
1749
1750                 case RESET_KIND_SUSPEND:
1751                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752                                       DRV_STATE_SUSPEND);
1753                         break;
1754
1755                 default:
1756                         break;
1757                 }
1758         }
1759 }
1760
1761 /* tp->lock is held. */
1762 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1763 {
1764         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1765                 switch (kind) {
1766                 case RESET_KIND_INIT:
1767                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1768                                       DRV_STATE_START_DONE);
1769                         break;
1770
1771                 case RESET_KIND_SHUTDOWN:
1772                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1773                                       DRV_STATE_UNLOAD_DONE);
1774                         break;
1775
1776                 default:
1777                         break;
1778                 }
1779         }
1780 }
1781
1782 /* tp->lock is held. */
1783 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1784 {
1785         if (tg3_flag(tp, ENABLE_ASF)) {
1786                 switch (kind) {
1787                 case RESET_KIND_INIT:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_START);
1790                         break;
1791
1792                 case RESET_KIND_SHUTDOWN:
1793                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794                                       DRV_STATE_UNLOAD);
1795                         break;
1796
1797                 case RESET_KIND_SUSPEND:
1798                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1799                                       DRV_STATE_SUSPEND);
1800                         break;
1801
1802                 default:
1803                         break;
1804                 }
1805         }
1806 }
1807
1808 static int tg3_poll_fw(struct tg3 *tp)
1809 {
1810         int i;
1811         u32 val;
1812
1813         if (tg3_flag(tp, NO_FWARE_REPORTED))
1814                 return 0;
1815
1816         if (tg3_flag(tp, IS_SSB_CORE)) {
1817                 /* We don't use firmware. */
1818                 return 0;
1819         }
1820
1821         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1822                 /* Wait up to 20ms for init done. */
1823                 for (i = 0; i < 200; i++) {
1824                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1825                                 return 0;
1826                         if (pci_channel_offline(tp->pdev))
1827                                 return -ENODEV;
1828
1829                         udelay(100);
1830                 }
1831                 return -ENODEV;
1832         }
1833
1834         /* Wait for firmware initialization to complete. */
1835         for (i = 0; i < 100000; i++) {
1836                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1837                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1838                         break;
1839                 if (pci_channel_offline(tp->pdev)) {
1840                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1841                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1842                                 netdev_info(tp->dev, "No firmware running\n");
1843                         }
1844
1845                         break;
1846                 }
1847
1848                 udelay(10);
1849         }
1850
1851         /* Chip might not be fitted with firmware.  Some Sun onboard
1852          * parts are configured like that.  So don't signal the timeout
1853          * of the above loop as an error, but do report the lack of
1854          * running firmware once.
1855          */
1856         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1857                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1858
1859                 netdev_info(tp->dev, "No firmware running\n");
1860         }
1861
1862         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1863                 /* The 57765 A0 needs a little more
1864                  * time to do some important work.
1865                  */
1866                 mdelay(10);
1867         }
1868
1869         return 0;
1870 }
1871
1872 static void tg3_link_report(struct tg3 *tp)
1873 {
1874         if (!netif_carrier_ok(tp->dev)) {
1875                 netif_info(tp, link, tp->dev, "Link is down\n");
1876                 tg3_ump_link_report(tp);
1877         } else if (netif_msg_link(tp)) {
1878                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1879                             (tp->link_config.active_speed == SPEED_1000 ?
1880                              1000 :
1881                              (tp->link_config.active_speed == SPEED_100 ?
1882                               100 : 10)),
1883                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1884                              "full" : "half"));
1885
1886                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1887                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1888                             "on" : "off",
1889                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1890                             "on" : "off");
1891
1892                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1893                         netdev_info(tp->dev, "EEE is %s\n",
1894                                     tp->setlpicnt ? "enabled" : "disabled");
1895
1896                 tg3_ump_link_report(tp);
1897         }
1898
1899         tp->link_up = netif_carrier_ok(tp->dev);
1900 }
1901
1902 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1903 {
1904         u32 flowctrl = 0;
1905
1906         if (adv & ADVERTISE_PAUSE_CAP) {
1907                 flowctrl |= FLOW_CTRL_RX;
1908                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1909                         flowctrl |= FLOW_CTRL_TX;
1910         } else if (adv & ADVERTISE_PAUSE_ASYM)
1911                 flowctrl |= FLOW_CTRL_TX;
1912
1913         return flowctrl;
1914 }
1915
1916 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1917 {
1918         u16 miireg;
1919
1920         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1921                 miireg = ADVERTISE_1000XPAUSE;
1922         else if (flow_ctrl & FLOW_CTRL_TX)
1923                 miireg = ADVERTISE_1000XPSE_ASYM;
1924         else if (flow_ctrl & FLOW_CTRL_RX)
1925                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1926         else
1927                 miireg = 0;
1928
1929         return miireg;
1930 }
1931
1932 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1933 {
1934         u32 flowctrl = 0;
1935
1936         if (adv & ADVERTISE_1000XPAUSE) {
1937                 flowctrl |= FLOW_CTRL_RX;
1938                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1939                         flowctrl |= FLOW_CTRL_TX;
1940         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1941                 flowctrl |= FLOW_CTRL_TX;
1942
1943         return flowctrl;
1944 }
1945
1946 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1947 {
1948         u8 cap = 0;
1949
1950         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1951                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1952         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1953                 if (lcladv & ADVERTISE_1000XPAUSE)
1954                         cap = FLOW_CTRL_RX;
1955                 if (rmtadv & ADVERTISE_1000XPAUSE)
1956                         cap = FLOW_CTRL_TX;
1957         }
1958
1959         return cap;
1960 }
1961
1962 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1963 {
1964         u8 autoneg;
1965         u8 flowctrl = 0;
1966         u32 old_rx_mode = tp->rx_mode;
1967         u32 old_tx_mode = tp->tx_mode;
1968
1969         if (tg3_flag(tp, USE_PHYLIB))
1970                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1971         else
1972                 autoneg = tp->link_config.autoneg;
1973
1974         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1975                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1976                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1977                 else
1978                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1979         } else
1980                 flowctrl = tp->link_config.flowctrl;
1981
1982         tp->link_config.active_flowctrl = flowctrl;
1983
1984         if (flowctrl & FLOW_CTRL_RX)
1985                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1986         else
1987                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1988
1989         if (old_rx_mode != tp->rx_mode)
1990                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1991
1992         if (flowctrl & FLOW_CTRL_TX)
1993                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1994         else
1995                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1996
1997         if (old_tx_mode != tp->tx_mode)
1998                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1999 }
2000
2001 static void tg3_adjust_link(struct net_device *dev)
2002 {
2003         u8 oldflowctrl, linkmesg = 0;
2004         u32 mac_mode, lcl_adv, rmt_adv;
2005         struct tg3 *tp = netdev_priv(dev);
2006         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2007
2008         spin_lock_bh(&tp->lock);
2009
2010         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2011                                     MAC_MODE_HALF_DUPLEX);
2012
2013         oldflowctrl = tp->link_config.active_flowctrl;
2014
2015         if (phydev->link) {
2016                 lcl_adv = 0;
2017                 rmt_adv = 0;
2018
2019                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2020                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2021                 else if (phydev->speed == SPEED_1000 ||
2022                          tg3_asic_rev(tp) != ASIC_REV_5785)
2023                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2024                 else
2025                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2026
2027                 if (phydev->duplex == DUPLEX_HALF)
2028                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2029                 else {
2030                         lcl_adv = mii_advertise_flowctrl(
2031                                   tp->link_config.flowctrl);
2032
2033                         if (phydev->pause)
2034                                 rmt_adv = LPA_PAUSE_CAP;
2035                         if (phydev->asym_pause)
2036                                 rmt_adv |= LPA_PAUSE_ASYM;
2037                 }
2038
2039                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2040         } else
2041                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2042
2043         if (mac_mode != tp->mac_mode) {
2044                 tp->mac_mode = mac_mode;
2045                 tw32_f(MAC_MODE, tp->mac_mode);
2046                 udelay(40);
2047         }
2048
2049         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2050                 if (phydev->speed == SPEED_10)
2051                         tw32(MAC_MI_STAT,
2052                              MAC_MI_STAT_10MBPS_MODE |
2053                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054                 else
2055                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2056         }
2057
2058         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2059                 tw32(MAC_TX_LENGTHS,
2060                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2061                       (6 << TX_LENGTHS_IPG_SHIFT) |
2062                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2063         else
2064                 tw32(MAC_TX_LENGTHS,
2065                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2066                       (6 << TX_LENGTHS_IPG_SHIFT) |
2067                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2068
2069         if (phydev->link != tp->old_link ||
2070             phydev->speed != tp->link_config.active_speed ||
2071             phydev->duplex != tp->link_config.active_duplex ||
2072             oldflowctrl != tp->link_config.active_flowctrl)
2073                 linkmesg = 1;
2074
2075         tp->old_link = phydev->link;
2076         tp->link_config.active_speed = phydev->speed;
2077         tp->link_config.active_duplex = phydev->duplex;
2078
2079         spin_unlock_bh(&tp->lock);
2080
2081         if (linkmesg)
2082                 tg3_link_report(tp);
2083 }
2084
2085 static int tg3_phy_init(struct tg3 *tp)
2086 {
2087         struct phy_device *phydev;
2088
2089         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2090                 return 0;
2091
2092         /* Bring the PHY back to a known state. */
2093         tg3_bmcr_reset(tp);
2094
2095         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2096
2097         /* Attach the MAC to the PHY. */
2098         phydev = phy_connect(tp->dev, phydev_name(phydev),
2099                              tg3_adjust_link, phydev->interface);
2100         if (IS_ERR(phydev)) {
2101                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2102                 return PTR_ERR(phydev);
2103         }
2104
2105         /* Mask with MAC supported features. */
2106         switch (phydev->interface) {
2107         case PHY_INTERFACE_MODE_GMII:
2108         case PHY_INTERFACE_MODE_RGMII:
2109                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2110                         phydev->supported &= (PHY_GBIT_FEATURES |
2111                                               SUPPORTED_Pause |
2112                                               SUPPORTED_Asym_Pause);
2113                         break;
2114                 }
2115                 /* fallthru */
2116         case PHY_INTERFACE_MODE_MII:
2117                 phydev->supported &= (PHY_BASIC_FEATURES |
2118                                       SUPPORTED_Pause |
2119                                       SUPPORTED_Asym_Pause);
2120                 break;
2121         default:
2122                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2123                 return -EINVAL;
2124         }
2125
2126         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2127
2128         phydev->advertising = phydev->supported;
2129
2130         phy_attached_info(phydev);
2131
2132         return 0;
2133 }
2134
2135 static void tg3_phy_start(struct tg3 *tp)
2136 {
2137         struct phy_device *phydev;
2138
2139         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2140                 return;
2141
2142         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2143
2144         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2145                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2146                 phydev->speed = tp->link_config.speed;
2147                 phydev->duplex = tp->link_config.duplex;
2148                 phydev->autoneg = tp->link_config.autoneg;
2149                 phydev->advertising = tp->link_config.advertising;
2150         }
2151
2152         phy_start(phydev);
2153
2154         phy_start_aneg(phydev);
2155 }
2156
2157 static void tg3_phy_stop(struct tg3 *tp)
2158 {
2159         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2160                 return;
2161
2162         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 }
2164
2165 static void tg3_phy_fini(struct tg3 *tp)
2166 {
2167         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2168                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2169                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2170         }
2171 }
2172
2173 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2174 {
2175         int err;
2176         u32 val;
2177
2178         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2179                 return 0;
2180
2181         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2182                 /* Cannot do read-modify-write on 5401 */
2183                 err = tg3_phy_auxctl_write(tp,
2184                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2185                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2186                                            0x4c20);
2187                 goto done;
2188         }
2189
2190         err = tg3_phy_auxctl_read(tp,
2191                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2192         if (err)
2193                 return err;
2194
2195         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2196         err = tg3_phy_auxctl_write(tp,
2197                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2198
2199 done:
2200         return err;
2201 }
2202
2203 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2204 {
2205         u32 phytest;
2206
2207         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2208                 u32 phy;
2209
2210                 tg3_writephy(tp, MII_TG3_FET_TEST,
2211                              phytest | MII_TG3_FET_SHADOW_EN);
2212                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2213                         if (enable)
2214                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2215                         else
2216                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2218                 }
2219                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2220         }
2221 }
2222
2223 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2224 {
2225         u32 reg;
2226
2227         if (!tg3_flag(tp, 5705_PLUS) ||
2228             (tg3_flag(tp, 5717_PLUS) &&
2229              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2230                 return;
2231
2232         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2233                 tg3_phy_fet_toggle_apd(tp, enable);
2234                 return;
2235         }
2236
2237         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2238               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2239               MII_TG3_MISC_SHDW_SCR5_SDTL |
2240               MII_TG3_MISC_SHDW_SCR5_C125OE;
2241         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2242                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2243
2244         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2245
2246
2247         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2248         if (enable)
2249                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2250
2251         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2252 }
2253
2254 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2255 {
2256         u32 phy;
2257
2258         if (!tg3_flag(tp, 5705_PLUS) ||
2259             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2260                 return;
2261
2262         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2263                 u32 ephy;
2264
2265                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2266                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2267
2268                         tg3_writephy(tp, MII_TG3_FET_TEST,
2269                                      ephy | MII_TG3_FET_SHADOW_EN);
2270                         if (!tg3_readphy(tp, reg, &phy)) {
2271                                 if (enable)
2272                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273                                 else
2274                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275                                 tg3_writephy(tp, reg, phy);
2276                         }
2277                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2278                 }
2279         } else {
2280                 int ret;
2281
2282                 ret = tg3_phy_auxctl_read(tp,
2283                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2284                 if (!ret) {
2285                         if (enable)
2286                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287                         else
2288                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289                         tg3_phy_auxctl_write(tp,
2290                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2291                 }
2292         }
2293 }
2294
2295 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2296 {
2297         int ret;
2298         u32 val;
2299
2300         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2301                 return;
2302
2303         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2304         if (!ret)
2305                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2306                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2307 }
2308
2309 static void tg3_phy_apply_otp(struct tg3 *tp)
2310 {
2311         u32 otp, phy;
2312
2313         if (!tp->phy_otp)
2314                 return;
2315
2316         otp = tp->phy_otp;
2317
2318         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2319                 return;
2320
2321         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2322         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2323         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2324
2325         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2326               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2327         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2328
2329         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2330         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2331         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2332
2333         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2334         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2335
2336         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2337         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2338
2339         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2340               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2341         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2342
2343         tg3_phy_toggle_auxctl_smdsp(tp, false);
2344 }
2345
2346 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2347 {
2348         u32 val;
2349         struct ethtool_eee *dest = &tp->eee;
2350
2351         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2352                 return;
2353
2354         if (eee)
2355                 dest = eee;
2356
2357         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2358                 return;
2359
2360         /* Pull eee_active */
2361         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2362             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2363                 dest->eee_active = 1;
2364         } else
2365                 dest->eee_active = 0;
2366
2367         /* Pull lp advertised settings */
2368         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2369                 return;
2370         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371
2372         /* Pull advertised and eee_enabled settings */
2373         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2374                 return;
2375         dest->eee_enabled = !!val;
2376         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377
2378         /* Pull tx_lpi_enabled */
2379         val = tr32(TG3_CPMU_EEE_MODE);
2380         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2381
2382         /* Pull lpi timer value */
2383         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2384 }
2385
2386 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2387 {
2388         u32 val;
2389
2390         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2391                 return;
2392
2393         tp->setlpicnt = 0;
2394
2395         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2396             current_link_up &&
2397             tp->link_config.active_duplex == DUPLEX_FULL &&
2398             (tp->link_config.active_speed == SPEED_100 ||
2399              tp->link_config.active_speed == SPEED_1000)) {
2400                 u32 eeectl;
2401
2402                 if (tp->link_config.active_speed == SPEED_1000)
2403                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2404                 else
2405                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2406
2407                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2408
2409                 tg3_eee_pull_config(tp, NULL);
2410                 if (tp->eee.eee_active)
2411                         tp->setlpicnt = 2;
2412         }
2413
2414         if (!tp->setlpicnt) {
2415                 if (current_link_up &&
2416                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2418                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2419                 }
2420
2421                 val = tr32(TG3_CPMU_EEE_MODE);
2422                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2423         }
2424 }
2425
2426 static void tg3_phy_eee_enable(struct tg3 *tp)
2427 {
2428         u32 val;
2429
2430         if (tp->link_config.active_speed == SPEED_1000 &&
2431             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2432              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2433              tg3_flag(tp, 57765_CLASS)) &&
2434             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2435                 val = MII_TG3_DSP_TAP26_ALNOKO |
2436                       MII_TG3_DSP_TAP26_RMRXSTO;
2437                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2438                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2439         }
2440
2441         val = tr32(TG3_CPMU_EEE_MODE);
2442         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2443 }
2444
2445 static int tg3_wait_macro_done(struct tg3 *tp)
2446 {
2447         int limit = 100;
2448
2449         while (limit--) {
2450                 u32 tmp32;
2451
2452                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2453                         if ((tmp32 & 0x1000) == 0)
2454                                 break;
2455                 }
2456         }
2457         if (limit < 0)
2458                 return -EBUSY;
2459
2460         return 0;
2461 }
2462
2463 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2464 {
2465         static const u32 test_pat[4][6] = {
2466         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2467         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2468         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2469         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2470         };
2471         int chan;
2472
2473         for (chan = 0; chan < 4; chan++) {
2474                 int i;
2475
2476                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477                              (chan * 0x2000) | 0x0200);
2478                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479
2480                 for (i = 0; i < 6; i++)
2481                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2482                                      test_pat[chan][i]);
2483
2484                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2485                 if (tg3_wait_macro_done(tp)) {
2486                         *resetp = 1;
2487                         return -EBUSY;
2488                 }
2489
2490                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2491                              (chan * 0x2000) | 0x0200);
2492                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2493                 if (tg3_wait_macro_done(tp)) {
2494                         *resetp = 1;
2495                         return -EBUSY;
2496                 }
2497
2498                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2499                 if (tg3_wait_macro_done(tp)) {
2500                         *resetp = 1;
2501                         return -EBUSY;
2502                 }
2503
2504                 for (i = 0; i < 6; i += 2) {
2505                         u32 low, high;
2506
2507                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2508                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2509                             tg3_wait_macro_done(tp)) {
2510                                 *resetp = 1;
2511                                 return -EBUSY;
2512                         }
2513                         low &= 0x7fff;
2514                         high &= 0x000f;
2515                         if (low != test_pat[chan][i] ||
2516                             high != test_pat[chan][i+1]) {
2517                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2518                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2519                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2520
2521                                 return -EBUSY;
2522                         }
2523                 }
2524         }
2525
2526         return 0;
2527 }
2528
2529 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2530 {
2531         int chan;
2532
2533         for (chan = 0; chan < 4; chan++) {
2534                 int i;
2535
2536                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2537                              (chan * 0x2000) | 0x0200);
2538                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2539                 for (i = 0; i < 6; i++)
2540                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2541                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2542                 if (tg3_wait_macro_done(tp))
2543                         return -EBUSY;
2544         }
2545
2546         return 0;
2547 }
2548
2549 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2550 {
2551         u32 reg32, phy9_orig;
2552         int retries, do_phy_reset, err;
2553
2554         retries = 10;
2555         do_phy_reset = 1;
2556         do {
2557                 if (do_phy_reset) {
2558                         err = tg3_bmcr_reset(tp);
2559                         if (err)
2560                                 return err;
2561                         do_phy_reset = 0;
2562                 }
2563
2564                 /* Disable transmitter and interrupt.  */
2565                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2566                         continue;
2567
2568                 reg32 |= 0x3000;
2569                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2570
2571                 /* Set full-duplex, 1000 mbps.  */
2572                 tg3_writephy(tp, MII_BMCR,
2573                              BMCR_FULLDPLX | BMCR_SPEED1000);
2574
2575                 /* Set to master mode.  */
2576                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2577                         continue;
2578
2579                 tg3_writephy(tp, MII_CTRL1000,
2580                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2581
2582                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2583                 if (err)
2584                         return err;
2585
2586                 /* Block the PHY control access.  */
2587                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2588
2589                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2590                 if (!err)
2591                         break;
2592         } while (--retries);
2593
2594         err = tg3_phy_reset_chanpat(tp);
2595         if (err)
2596                 return err;
2597
2598         tg3_phydsp_write(tp, 0x8005, 0x0000);
2599
2600         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2601         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2602
2603         tg3_phy_toggle_auxctl_smdsp(tp, false);
2604
2605         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2606
2607         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2608         if (err)
2609                 return err;
2610
2611         reg32 &= ~0x3000;
2612         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2613
2614         return 0;
2615 }
2616
2617 static void tg3_carrier_off(struct tg3 *tp)
2618 {
2619         netif_carrier_off(tp->dev);
2620         tp->link_up = false;
2621 }
2622
2623 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2624 {
2625         if (tg3_flag(tp, ENABLE_ASF))
2626                 netdev_warn(tp->dev,
2627                             "Management side-band traffic will be interrupted during phy settings change\n");
2628 }
2629
2630 /* This will reset the tigon3 PHY if there is no valid
2631  * link unless the FORCE argument is non-zero.
2632  */
2633 static int tg3_phy_reset(struct tg3 *tp)
2634 {
2635         u32 val, cpmuctrl;
2636         int err;
2637
2638         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2639                 val = tr32(GRC_MISC_CFG);
2640                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2641                 udelay(40);
2642         }
2643         err  = tg3_readphy(tp, MII_BMSR, &val);
2644         err |= tg3_readphy(tp, MII_BMSR, &val);
2645         if (err != 0)
2646                 return -EBUSY;
2647
2648         if (netif_running(tp->dev) && tp->link_up) {
2649                 netif_carrier_off(tp->dev);
2650                 tg3_link_report(tp);
2651         }
2652
2653         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2654             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2655             tg3_asic_rev(tp) == ASIC_REV_5705) {
2656                 err = tg3_phy_reset_5703_4_5(tp);
2657                 if (err)
2658                         return err;
2659                 goto out;
2660         }
2661
2662         cpmuctrl = 0;
2663         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2664             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2665                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2666                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2667                         tw32(TG3_CPMU_CTRL,
2668                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2669         }
2670
2671         err = tg3_bmcr_reset(tp);
2672         if (err)
2673                 return err;
2674
2675         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2676                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2677                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2678
2679                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2680         }
2681
2682         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2683             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2684                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2685                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2686                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2687                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2688                         udelay(40);
2689                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2690                 }
2691         }
2692
2693         if (tg3_flag(tp, 5717_PLUS) &&
2694             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2695                 return 0;
2696
2697         tg3_phy_apply_otp(tp);
2698
2699         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2700                 tg3_phy_toggle_apd(tp, true);
2701         else
2702                 tg3_phy_toggle_apd(tp, false);
2703
2704 out:
2705         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2706             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2707                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2708                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2709                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2710         }
2711
2712         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2713                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715         }
2716
2717         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2718                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2719                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2720                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2721                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2722                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2723                 }
2724         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2725                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2726                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2727                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2728                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2729                                 tg3_writephy(tp, MII_TG3_TEST1,
2730                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2731                         } else
2732                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2733
2734                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2735                 }
2736         }
2737
2738         /* Set Extended packet length bit (bit 14) on all chips that */
2739         /* support jumbo frames */
2740         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2741                 /* Cannot do read-modify-write on 5401 */
2742                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2743         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2744                 /* Set bit 14 with read-modify-write to preserve other bits */
2745                 err = tg3_phy_auxctl_read(tp,
2746                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2747                 if (!err)
2748                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2749                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2750         }
2751
2752         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2753          * jumbo frames transmission.
2754          */
2755         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2756                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2757                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2758                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2759         }
2760
2761         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2762                 /* adjust output voltage */
2763                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2764         }
2765
2766         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2767                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2768
2769         tg3_phy_toggle_automdix(tp, true);
2770         tg3_phy_set_wirespeed(tp);
2771         return 0;
2772 }
2773
2774 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2775 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2776 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2777                                           TG3_GPIO_MSG_NEED_VAUX)
2778 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2779         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2780          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2781          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2782          (TG3_GPIO_MSG_DRVR_PRES << 12))
2783
2784 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2785         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2786          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2787          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2788          (TG3_GPIO_MSG_NEED_VAUX << 12))
2789
2790 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2791 {
2792         u32 status, shift;
2793
2794         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2795             tg3_asic_rev(tp) == ASIC_REV_5719)
2796                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2797         else
2798                 status = tr32(TG3_CPMU_DRV_STATUS);
2799
2800         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2801         status &= ~(TG3_GPIO_MSG_MASK << shift);
2802         status |= (newstat << shift);
2803
2804         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2805             tg3_asic_rev(tp) == ASIC_REV_5719)
2806                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2807         else
2808                 tw32(TG3_CPMU_DRV_STATUS, status);
2809
2810         return status >> TG3_APE_GPIO_MSG_SHIFT;
2811 }
2812
2813 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2814 {
2815         if (!tg3_flag(tp, IS_NIC))
2816                 return 0;
2817
2818         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2819             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2820             tg3_asic_rev(tp) == ASIC_REV_5720) {
2821                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2822                         return -EIO;
2823
2824                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2825
2826                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2828
2829                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2830         } else {
2831                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2832                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2833         }
2834
2835         return 0;
2836 }
2837
2838 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2839 {
2840         u32 grc_local_ctrl;
2841
2842         if (!tg3_flag(tp, IS_NIC) ||
2843             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2844             tg3_asic_rev(tp) == ASIC_REV_5701)
2845                 return;
2846
2847         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2848
2849         tw32_wait_f(GRC_LOCAL_CTRL,
2850                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2851                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2852
2853         tw32_wait_f(GRC_LOCAL_CTRL,
2854                     grc_local_ctrl,
2855                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2856
2857         tw32_wait_f(GRC_LOCAL_CTRL,
2858                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2860 }
2861
2862 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2863 {
2864         if (!tg3_flag(tp, IS_NIC))
2865                 return;
2866
2867         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2868             tg3_asic_rev(tp) == ASIC_REV_5701) {
2869                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2870                             (GRC_LCLCTRL_GPIO_OE0 |
2871                              GRC_LCLCTRL_GPIO_OE1 |
2872                              GRC_LCLCTRL_GPIO_OE2 |
2873                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2874                              GRC_LCLCTRL_GPIO_OUTPUT1),
2875                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2876         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2877                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2878                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2879                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2880                                      GRC_LCLCTRL_GPIO_OE1 |
2881                                      GRC_LCLCTRL_GPIO_OE2 |
2882                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2883                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2884                                      tp->grc_local_ctrl;
2885                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2887
2888                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2889                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2891
2892                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2893                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895         } else {
2896                 u32 no_gpio2;
2897                 u32 grc_local_ctrl = 0;
2898
2899                 /* Workaround to prevent overdrawing Amps. */
2900                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2901                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2902                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2903                                     grc_local_ctrl,
2904                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2905                 }
2906
2907                 /* On 5753 and variants, GPIO2 cannot be used. */
2908                 no_gpio2 = tp->nic_sram_data_cfg &
2909                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2910
2911                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2912                                   GRC_LCLCTRL_GPIO_OE1 |
2913                                   GRC_LCLCTRL_GPIO_OE2 |
2914                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2915                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2916                 if (no_gpio2) {
2917                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2918                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2919                 }
2920                 tw32_wait_f(GRC_LOCAL_CTRL,
2921                             tp->grc_local_ctrl | grc_local_ctrl,
2922                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2923
2924                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2925
2926                 tw32_wait_f(GRC_LOCAL_CTRL,
2927                             tp->grc_local_ctrl | grc_local_ctrl,
2928                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2929
2930                 if (!no_gpio2) {
2931                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2932                         tw32_wait_f(GRC_LOCAL_CTRL,
2933                                     tp->grc_local_ctrl | grc_local_ctrl,
2934                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2935                 }
2936         }
2937 }
2938
2939 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2940 {
2941         u32 msg = 0;
2942
2943         /* Serialize power state transitions */
2944         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2945                 return;
2946
2947         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2948                 msg = TG3_GPIO_MSG_NEED_VAUX;
2949
2950         msg = tg3_set_function_status(tp, msg);
2951
2952         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2953                 goto done;
2954
2955         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2956                 tg3_pwrsrc_switch_to_vaux(tp);
2957         else
2958                 tg3_pwrsrc_die_with_vmain(tp);
2959
2960 done:
2961         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2962 }
2963
2964 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2965 {
2966         bool need_vaux = false;
2967
2968         /* The GPIOs do something completely different on 57765. */
2969         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2970                 return;
2971
2972         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2973             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2974             tg3_asic_rev(tp) == ASIC_REV_5720) {
2975                 tg3_frob_aux_power_5717(tp, include_wol ?
2976                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2977                 return;
2978         }
2979
2980         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2981                 struct net_device *dev_peer;
2982
2983                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2984
2985                 /* remove_one() may have been run on the peer. */
2986                 if (dev_peer) {
2987                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2988
2989                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2990                                 return;
2991
2992                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2993                             tg3_flag(tp_peer, ENABLE_ASF))
2994                                 need_vaux = true;
2995                 }
2996         }
2997
2998         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2999             tg3_flag(tp, ENABLE_ASF))
3000                 need_vaux = true;
3001
3002         if (need_vaux)
3003                 tg3_pwrsrc_switch_to_vaux(tp);
3004         else
3005                 tg3_pwrsrc_die_with_vmain(tp);
3006 }
3007
3008 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3009 {
3010         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3011                 return 1;
3012         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3013                 if (speed != SPEED_10)
3014                         return 1;
3015         } else if (speed == SPEED_10)
3016                 return 1;
3017
3018         return 0;
3019 }
3020
3021 static bool tg3_phy_power_bug(struct tg3 *tp)
3022 {
3023         switch (tg3_asic_rev(tp)) {
3024         case ASIC_REV_5700:
3025         case ASIC_REV_5704:
3026                 return true;
3027         case ASIC_REV_5780:
3028                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3029                         return true;
3030                 return false;
3031         case ASIC_REV_5717:
3032                 if (!tp->pci_fn)
3033                         return true;
3034                 return false;
3035         case ASIC_REV_5719:
3036         case ASIC_REV_5720:
3037                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3038                     !tp->pci_fn)
3039                         return true;
3040                 return false;
3041         }
3042
3043         return false;
3044 }
3045
3046 static bool tg3_phy_led_bug(struct tg3 *tp)
3047 {
3048         switch (tg3_asic_rev(tp)) {
3049         case ASIC_REV_5719:
3050         case ASIC_REV_5720:
3051                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3052                     !tp->pci_fn)
3053                         return true;
3054                 return false;
3055         }
3056
3057         return false;
3058 }
3059
3060 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3061 {
3062         u32 val;
3063
3064         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3065                 return;
3066
3067         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3068                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3069                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3070                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3071
3072                         sg_dig_ctrl |=
3073                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3074                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3075                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3076                 }
3077                 return;
3078         }
3079
3080         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3081                 tg3_bmcr_reset(tp);
3082                 val = tr32(GRC_MISC_CFG);
3083                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3084                 udelay(40);
3085                 return;
3086         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3087                 u32 phytest;
3088                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3089                         u32 phy;
3090
3091                         tg3_writephy(tp, MII_ADVERTISE, 0);
3092                         tg3_writephy(tp, MII_BMCR,
3093                                      BMCR_ANENABLE | BMCR_ANRESTART);
3094
3095                         tg3_writephy(tp, MII_TG3_FET_TEST,
3096                                      phytest | MII_TG3_FET_SHADOW_EN);
3097                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3098                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3099                                 tg3_writephy(tp,
3100                                              MII_TG3_FET_SHDW_AUXMODE4,
3101                                              phy);
3102                         }
3103                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3104                 }
3105                 return;
3106         } else if (do_low_power) {
3107                 if (!tg3_phy_led_bug(tp))
3108                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3109                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3110
3111                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3112                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3113                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3114                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3115         }
3116
3117         /* The PHY should not be powered down on some chips because
3118          * of bugs.
3119          */
3120         if (tg3_phy_power_bug(tp))
3121                 return;
3122
3123         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3124             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3125                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3126                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3127                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3128                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3129         }
3130
3131         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3132 }
3133
3134 /* tp->lock is held. */
3135 static int tg3_nvram_lock(struct tg3 *tp)
3136 {
3137         if (tg3_flag(tp, NVRAM)) {
3138                 int i;
3139
3140                 if (tp->nvram_lock_cnt == 0) {
3141                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3142                         for (i = 0; i < 8000; i++) {
3143                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3144                                         break;
3145                                 udelay(20);
3146                         }
3147                         if (i == 8000) {
3148                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3149                                 return -ENODEV;
3150                         }
3151                 }
3152                 tp->nvram_lock_cnt++;
3153         }
3154         return 0;
3155 }
3156
3157 /* tp->lock is held. */
3158 static void tg3_nvram_unlock(struct tg3 *tp)
3159 {
3160         if (tg3_flag(tp, NVRAM)) {
3161                 if (tp->nvram_lock_cnt > 0)
3162                         tp->nvram_lock_cnt--;
3163                 if (tp->nvram_lock_cnt == 0)
3164                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3165         }
3166 }
3167
3168 /* tp->lock is held. */
3169 static void tg3_enable_nvram_access(struct tg3 *tp)
3170 {
3171         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3172                 u32 nvaccess = tr32(NVRAM_ACCESS);
3173
3174                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3175         }
3176 }
3177
3178 /* tp->lock is held. */
3179 static void tg3_disable_nvram_access(struct tg3 *tp)
3180 {
3181         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3182                 u32 nvaccess = tr32(NVRAM_ACCESS);
3183
3184                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3185         }
3186 }
3187
3188 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3189                                         u32 offset, u32 *val)
3190 {
3191         u32 tmp;
3192         int i;
3193
3194         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3195                 return -EINVAL;
3196
3197         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3198                                         EEPROM_ADDR_DEVID_MASK |
3199                                         EEPROM_ADDR_READ);
3200         tw32(GRC_EEPROM_ADDR,
3201              tmp |
3202              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3203              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3204               EEPROM_ADDR_ADDR_MASK) |
3205              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3206
3207         for (i = 0; i < 1000; i++) {
3208                 tmp = tr32(GRC_EEPROM_ADDR);
3209
3210                 if (tmp & EEPROM_ADDR_COMPLETE)
3211                         break;
3212                 msleep(1);
3213         }
3214         if (!(tmp & EEPROM_ADDR_COMPLETE))
3215                 return -EBUSY;
3216
3217         tmp = tr32(GRC_EEPROM_DATA);
3218
3219         /*
3220          * The data will always be opposite the native endian
3221          * format.  Perform a blind byteswap to compensate.
3222          */
3223         *val = swab32(tmp);
3224
3225         return 0;
3226 }
3227
3228 #define NVRAM_CMD_TIMEOUT 5000
3229
3230 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3231 {
3232         int i;
3233
3234         tw32(NVRAM_CMD, nvram_cmd);
3235         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3236                 usleep_range(10, 40);
3237                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3238                         udelay(10);
3239                         break;
3240                 }
3241         }
3242
3243         if (i == NVRAM_CMD_TIMEOUT)
3244                 return -EBUSY;
3245
3246         return 0;
3247 }
3248
3249 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3250 {
3251         if (tg3_flag(tp, NVRAM) &&
3252             tg3_flag(tp, NVRAM_BUFFERED) &&
3253             tg3_flag(tp, FLASH) &&
3254             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3255             (tp->nvram_jedecnum == JEDEC_ATMEL))
3256
3257                 addr = ((addr / tp->nvram_pagesize) <<
3258                         ATMEL_AT45DB0X1B_PAGE_POS) +
3259                        (addr % tp->nvram_pagesize);
3260
3261         return addr;
3262 }
3263
3264 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3265 {
3266         if (tg3_flag(tp, NVRAM) &&
3267             tg3_flag(tp, NVRAM_BUFFERED) &&
3268             tg3_flag(tp, FLASH) &&
3269             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3270             (tp->nvram_jedecnum == JEDEC_ATMEL))
3271
3272                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3273                         tp->nvram_pagesize) +
3274                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3275
3276         return addr;
3277 }
3278
3279 /* NOTE: Data read in from NVRAM is byteswapped according to
3280  * the byteswapping settings for all other register accesses.
3281  * tg3 devices are BE devices, so on a BE machine, the data
3282  * returned will be exactly as it is seen in NVRAM.  On a LE
3283  * machine, the 32-bit value will be byteswapped.
3284  */
3285 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3286 {
3287         int ret;
3288
3289         if (!tg3_flag(tp, NVRAM))
3290                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3291
3292         offset = tg3_nvram_phys_addr(tp, offset);
3293
3294         if (offset > NVRAM_ADDR_MSK)
3295                 return -EINVAL;
3296
3297         ret = tg3_nvram_lock(tp);
3298         if (ret)
3299                 return ret;
3300
3301         tg3_enable_nvram_access(tp);
3302
3303         tw32(NVRAM_ADDR, offset);
3304         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3305                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3306
3307         if (ret == 0)
3308                 *val = tr32(NVRAM_RDDATA);
3309
3310         tg3_disable_nvram_access(tp);
3311
3312         tg3_nvram_unlock(tp);
3313
3314         return ret;
3315 }
3316
3317 /* Ensures NVRAM data is in bytestream format. */
3318 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3319 {
3320         u32 v;
3321         int res = tg3_nvram_read(tp, offset, &v);
3322         if (!res)
3323                 *val = cpu_to_be32(v);
3324         return res;
3325 }
3326
3327 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3328                                     u32 offset, u32 len, u8 *buf)
3329 {
3330         int i, j, rc = 0;
3331         u32 val;
3332
3333         for (i = 0; i < len; i += 4) {
3334                 u32 addr;
3335                 __be32 data;
3336
3337                 addr = offset + i;
3338
3339                 memcpy(&data, buf + i, 4);
3340
3341                 /*
3342                  * The SEEPROM interface expects the data to always be opposite
3343                  * the native endian format.  We accomplish this by reversing
3344                  * all the operations that would have been performed on the
3345                  * data from a call to tg3_nvram_read_be32().
3346                  */
3347                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3348
3349                 val = tr32(GRC_EEPROM_ADDR);
3350                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3351
3352                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3353                         EEPROM_ADDR_READ);
3354                 tw32(GRC_EEPROM_ADDR, val |
3355                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3356                         (addr & EEPROM_ADDR_ADDR_MASK) |
3357                         EEPROM_ADDR_START |
3358                         EEPROM_ADDR_WRITE);
3359
3360                 for (j = 0; j < 1000; j++) {
3361                         val = tr32(GRC_EEPROM_ADDR);
3362
3363                         if (val & EEPROM_ADDR_COMPLETE)
3364                                 break;
3365                         msleep(1);
3366                 }
3367                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3368                         rc = -EBUSY;
3369                         break;
3370                 }
3371         }
3372
3373         return rc;
3374 }
3375
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3378                 u8 *buf)
3379 {
3380         int ret = 0;
3381         u32 pagesize = tp->nvram_pagesize;
3382         u32 pagemask = pagesize - 1;
3383         u32 nvram_cmd;
3384         u8 *tmp;
3385
3386         tmp = kmalloc(pagesize, GFP_KERNEL);
3387         if (tmp == NULL)
3388                 return -ENOMEM;
3389
3390         while (len) {
3391                 int j;
3392                 u32 phy_addr, page_off, size;
3393
3394                 phy_addr = offset & ~pagemask;
3395
3396                 for (j = 0; j < pagesize; j += 4) {
3397                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3398                                                   (__be32 *) (tmp + j));
3399                         if (ret)
3400                                 break;
3401                 }
3402                 if (ret)
3403                         break;
3404
3405                 page_off = offset & pagemask;
3406                 size = pagesize;
3407                 if (len < size)
3408                         size = len;
3409
3410                 len -= size;
3411
3412                 memcpy(tmp + page_off, buf, size);
3413
3414                 offset = offset + (pagesize - page_off);
3415
3416                 tg3_enable_nvram_access(tp);
3417
3418                 /*
3419                  * Before we can erase the flash page, we need
3420                  * to issue a special "write enable" command.
3421                  */
3422                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3423
3424                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3425                         break;
3426
3427                 /* Erase the target page */
3428                 tw32(NVRAM_ADDR, phy_addr);
3429
3430                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3431                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3432
3433                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434                         break;
3435
3436                 /* Issue another write enable to start the write. */
3437                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438
3439                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440                         break;
3441
3442                 for (j = 0; j < pagesize; j += 4) {
3443                         __be32 data;
3444
3445                         data = *((__be32 *) (tmp + j));
3446
3447                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3448
3449                         tw32(NVRAM_ADDR, phy_addr + j);
3450
3451                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3452                                 NVRAM_CMD_WR;
3453
3454                         if (j == 0)
3455                                 nvram_cmd |= NVRAM_CMD_FIRST;
3456                         else if (j == (pagesize - 4))
3457                                 nvram_cmd |= NVRAM_CMD_LAST;
3458
3459                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3460                         if (ret)
3461                                 break;
3462                 }
3463                 if (ret)
3464                         break;
3465         }
3466
3467         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3468         tg3_nvram_exec_cmd(tp, nvram_cmd);
3469
3470         kfree(tmp);
3471
3472         return ret;
3473 }
3474
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3477                 u8 *buf)
3478 {
3479         int i, ret = 0;
3480
3481         for (i = 0; i < len; i += 4, offset += 4) {
3482                 u32 page_off, phy_addr, nvram_cmd;
3483                 __be32 data;
3484
3485                 memcpy(&data, buf + i, 4);
3486                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3487
3488                 page_off = offset % tp->nvram_pagesize;
3489
3490                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3491
3492                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3493
3494                 if (page_off == 0 || i == 0)
3495                         nvram_cmd |= NVRAM_CMD_FIRST;
3496                 if (page_off == (tp->nvram_pagesize - 4))
3497                         nvram_cmd |= NVRAM_CMD_LAST;
3498
3499                 if (i == (len - 4))
3500                         nvram_cmd |= NVRAM_CMD_LAST;
3501
3502                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3503                     !tg3_flag(tp, FLASH) ||
3504                     !tg3_flag(tp, 57765_PLUS))
3505                         tw32(NVRAM_ADDR, phy_addr);
3506
3507                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3508                     !tg3_flag(tp, 5755_PLUS) &&
3509                     (tp->nvram_jedecnum == JEDEC_ST) &&
3510                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3511                         u32 cmd;
3512
3513                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3514                         ret = tg3_nvram_exec_cmd(tp, cmd);
3515                         if (ret)
3516                                 break;
3517                 }
3518                 if (!tg3_flag(tp, FLASH)) {
3519                         /* We always do complete word writes to eeprom. */
3520                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3521                 }
3522
3523                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524                 if (ret)
3525                         break;
3526         }
3527         return ret;
3528 }
3529
3530 /* offset and length are dword aligned */
3531 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3532 {
3533         int ret;
3534
3535         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3536                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3537                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3538                 udelay(40);
3539         }
3540
3541         if (!tg3_flag(tp, NVRAM)) {
3542                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3543         } else {
3544                 u32 grc_mode;
3545
3546                 ret = tg3_nvram_lock(tp);
3547                 if (ret)
3548                         return ret;
3549
3550                 tg3_enable_nvram_access(tp);
3551                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3552                         tw32(NVRAM_WRITE1, 0x406);
3553
3554                 grc_mode = tr32(GRC_MODE);
3555                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3556
3557                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3558                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3559                                 buf);
3560                 } else {
3561                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3562                                 buf);
3563                 }
3564
3565                 grc_mode = tr32(GRC_MODE);
3566                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3567
3568                 tg3_disable_nvram_access(tp);
3569                 tg3_nvram_unlock(tp);
3570         }
3571
3572         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3573                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574                 udelay(40);
3575         }
3576
3577         return ret;
3578 }
3579
3580 #define RX_CPU_SCRATCH_BASE     0x30000
3581 #define RX_CPU_SCRATCH_SIZE     0x04000
3582 #define TX_CPU_SCRATCH_BASE     0x34000
3583 #define TX_CPU_SCRATCH_SIZE     0x04000
3584
3585 /* tp->lock is held. */
3586 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3587 {
3588         int i;
3589         const int iters = 10000;
3590
3591         for (i = 0; i < iters; i++) {
3592                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3593                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3594                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3595                         break;
3596                 if (pci_channel_offline(tp->pdev))
3597                         return -EBUSY;
3598         }
3599
3600         return (i == iters) ? -EBUSY : 0;
3601 }
3602
3603 /* tp->lock is held. */
3604 static int tg3_rxcpu_pause(struct tg3 *tp)
3605 {
3606         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3607
3608         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3609         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3610         udelay(10);
3611
3612         return rc;
3613 }
3614
3615 /* tp->lock is held. */
3616 static int tg3_txcpu_pause(struct tg3 *tp)
3617 {
3618         return tg3_pause_cpu(tp, TX_CPU_BASE);
3619 }
3620
3621 /* tp->lock is held. */
3622 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3623 {
3624         tw32(cpu_base + CPU_STATE, 0xffffffff);
3625         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3626 }
3627
3628 /* tp->lock is held. */
3629 static void tg3_rxcpu_resume(struct tg3 *tp)
3630 {
3631         tg3_resume_cpu(tp, RX_CPU_BASE);
3632 }
3633
3634 /* tp->lock is held. */
3635 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3636 {
3637         int rc;
3638
3639         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3640
3641         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3642                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3643
3644                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3645                 return 0;
3646         }
3647         if (cpu_base == RX_CPU_BASE) {
3648                 rc = tg3_rxcpu_pause(tp);
3649         } else {
3650                 /*
3651                  * There is only an Rx CPU for the 5750 derivative in the
3652                  * BCM4785.
3653                  */
3654                 if (tg3_flag(tp, IS_SSB_CORE))
3655                         return 0;
3656
3657                 rc = tg3_txcpu_pause(tp);
3658         }
3659
3660         if (rc) {
3661                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3662                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3663                 return -ENODEV;
3664         }
3665
3666         /* Clear firmware's nvram arbitration. */
3667         if (tg3_flag(tp, NVRAM))
3668                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3669         return 0;
3670 }
3671
3672 static int tg3_fw_data_len(struct tg3 *tp,
3673                            const struct tg3_firmware_hdr *fw_hdr)
3674 {
3675         int fw_len;
3676
3677         /* Non fragmented firmware have one firmware header followed by a
3678          * contiguous chunk of data to be written. The length field in that
3679          * header is not the length of data to be written but the complete
3680          * length of the bss. The data length is determined based on
3681          * tp->fw->size minus headers.
3682          *
3683          * Fragmented firmware have a main header followed by multiple
3684          * fragments. Each fragment is identical to non fragmented firmware
3685          * with a firmware header followed by a contiguous chunk of data. In
3686          * the main header, the length field is unused and set to 0xffffffff.
3687          * In each fragment header the length is the entire size of that
3688          * fragment i.e. fragment data + header length. Data length is
3689          * therefore length field in the header minus TG3_FW_HDR_LEN.
3690          */
3691         if (tp->fw_len == 0xffffffff)
3692                 fw_len = be32_to_cpu(fw_hdr->len);
3693         else
3694                 fw_len = tp->fw->size;
3695
3696         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3697 }
3698
3699 /* tp->lock is held. */
3700 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3701                                  u32 cpu_scratch_base, int cpu_scratch_size,
3702                                  const struct tg3_firmware_hdr *fw_hdr)
3703 {
3704         int err, i;
3705         void (*write_op)(struct tg3 *, u32, u32);
3706         int total_len = tp->fw->size;
3707
3708         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3709                 netdev_err(tp->dev,
3710                            "%s: Trying to load TX cpu firmware which is 5705\n",
3711                            __func__);
3712                 return -EINVAL;
3713         }
3714
3715         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3716                 write_op = tg3_write_mem;
3717         else
3718                 write_op = tg3_write_indirect_reg32;
3719
3720         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3721                 /* It is possible that bootcode is still loading at this point.
3722                  * Get the nvram lock first before halting the cpu.
3723                  */
3724                 int lock_err = tg3_nvram_lock(tp);
3725                 err = tg3_halt_cpu(tp, cpu_base);
3726                 if (!lock_err)
3727                         tg3_nvram_unlock(tp);
3728                 if (err)
3729                         goto out;
3730
3731                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3732                         write_op(tp, cpu_scratch_base + i, 0);
3733                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3734                 tw32(cpu_base + CPU_MODE,
3735                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3736         } else {
3737                 /* Subtract additional main header for fragmented firmware and
3738                  * advance to the first fragment
3739                  */
3740                 total_len -= TG3_FW_HDR_LEN;
3741                 fw_hdr++;
3742         }
3743
3744         do {
3745                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3746                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3747                         write_op(tp, cpu_scratch_base +
3748                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3749                                      (i * sizeof(u32)),
3750                                  be32_to_cpu(fw_data[i]));
3751
3752                 total_len -= be32_to_cpu(fw_hdr->len);
3753
3754                 /* Advance to next fragment */
3755                 fw_hdr = (struct tg3_firmware_hdr *)
3756                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3757         } while (total_len > 0);
3758
3759         err = 0;
3760
3761 out:
3762         return err;
3763 }
3764
3765 /* tp->lock is held. */
3766 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3767 {
3768         int i;
3769         const int iters = 5;
3770
3771         tw32(cpu_base + CPU_STATE, 0xffffffff);
3772         tw32_f(cpu_base + CPU_PC, pc);
3773
3774         for (i = 0; i < iters; i++) {
3775                 if (tr32(cpu_base + CPU_PC) == pc)
3776                         break;
3777                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3778                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3779                 tw32_f(cpu_base + CPU_PC, pc);
3780                 udelay(1000);
3781         }
3782
3783         return (i == iters) ? -EBUSY : 0;
3784 }
3785
3786 /* tp->lock is held. */
3787 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3788 {
3789         const struct tg3_firmware_hdr *fw_hdr;
3790         int err;
3791
3792         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3793
3794         /* Firmware blob starts with version numbers, followed by
3795            start address and length. We are setting complete length.
3796            length = end_address_of_bss - start_address_of_text.
3797            Remainder is the blob to be loaded contiguously
3798            from start address. */
3799
3800         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3801                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3807                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3808                                     fw_hdr);
3809         if (err)
3810                 return err;
3811
3812         /* Now startup only the RX cpu. */
3813         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3814                                        be32_to_cpu(fw_hdr->base_addr));
3815         if (err) {
3816                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3817                            "should be %08x\n", __func__,
3818                            tr32(RX_CPU_BASE + CPU_PC),
3819                                 be32_to_cpu(fw_hdr->base_addr));
3820                 return -ENODEV;
3821         }
3822
3823         tg3_rxcpu_resume(tp);
3824
3825         return 0;
3826 }
3827
3828 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3829 {
3830         const int iters = 1000;
3831         int i;
3832         u32 val;
3833
3834         /* Wait for boot code to complete initialization and enter service
3835          * loop. It is then safe to download service patches
3836          */
3837         for (i = 0; i < iters; i++) {
3838                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839                         break;
3840
3841                 udelay(10);
3842         }
3843
3844         if (i == iters) {
3845                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3846                 return -EBUSY;
3847         }
3848
3849         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3850         if (val & 0xff) {
3851                 netdev_warn(tp->dev,
3852                             "Other patches exist. Not downloading EEE patch\n");
3853                 return -EEXIST;
3854         }
3855
3856         return 0;
3857 }
3858
3859 /* tp->lock is held. */
3860 static void tg3_load_57766_firmware(struct tg3 *tp)
3861 {
3862         struct tg3_firmware_hdr *fw_hdr;
3863
3864         if (!tg3_flag(tp, NO_NVRAM))
3865                 return;
3866
3867         if (tg3_validate_rxcpu_state(tp))
3868                 return;
3869
3870         if (!tp->fw)
3871                 return;
3872
3873         /* This firmware blob has a different format than older firmware
3874          * releases as given below. The main difference is we have fragmented
3875          * data to be written to non-contiguous locations.
3876          *
3877          * In the beginning we have a firmware header identical to other
3878          * firmware which consists of version, base addr and length. The length
3879          * here is unused and set to 0xffffffff.
3880          *
3881          * This is followed by a series of firmware fragments which are
3882          * individually identical to previous firmware. i.e. they have the
3883          * firmware header and followed by data for that fragment. The version
3884          * field of the individual fragment header is unused.
3885          */
3886
3887         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3888         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3889                 return;
3890
3891         if (tg3_rxcpu_pause(tp))
3892                 return;
3893
3894         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3895         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3896
3897         tg3_rxcpu_resume(tp);
3898 }
3899
3900 /* tp->lock is held. */
3901 static int tg3_load_tso_firmware(struct tg3 *tp)
3902 {
3903         const struct tg3_firmware_hdr *fw_hdr;
3904         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3905         int err;
3906
3907         if (!tg3_flag(tp, FW_TSO))
3908                 return 0;
3909
3910         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3911
3912         /* Firmware blob starts with version numbers, followed by
3913            start address and length. We are setting complete length.
3914            length = end_address_of_bss - start_address_of_text.
3915            Remainder is the blob to be loaded contiguously
3916            from start address. */
3917
3918         cpu_scratch_size = tp->fw_len;
3919
3920         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3921                 cpu_base = RX_CPU_BASE;
3922                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3923         } else {
3924                 cpu_base = TX_CPU_BASE;
3925                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3926                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3927         }
3928
3929         err = tg3_load_firmware_cpu(tp, cpu_base,
3930                                     cpu_scratch_base, cpu_scratch_size,
3931                                     fw_hdr);
3932         if (err)
3933                 return err;
3934
3935         /* Now startup the cpu. */
3936         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3937                                        be32_to_cpu(fw_hdr->base_addr));
3938         if (err) {
3939                 netdev_err(tp->dev,
3940                            "%s fails to set CPU PC, is %08x should be %08x\n",
3941                            __func__, tr32(cpu_base + CPU_PC),
3942                            be32_to_cpu(fw_hdr->base_addr));
3943                 return -ENODEV;
3944         }
3945
3946         tg3_resume_cpu(tp, cpu_base);
3947         return 0;
3948 }
3949
3950 /* tp->lock is held. */
3951 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3952 {
3953         u32 addr_high, addr_low;
3954
3955         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3956         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3957                     (mac_addr[4] <<  8) | mac_addr[5]);
3958
3959         if (index < 4) {
3960                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3961                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3962         } else {
3963                 index -= 4;
3964                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3965                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3966         }
3967 }
3968
3969 /* tp->lock is held. */
3970 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3971 {
3972         u32 addr_high;
3973         int i;
3974
3975         for (i = 0; i < 4; i++) {
3976                 if (i == 1 && skip_mac_1)
3977                         continue;
3978                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3979         }
3980
3981         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3982             tg3_asic_rev(tp) == ASIC_REV_5704) {
3983                 for (i = 4; i < 16; i++)
3984                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3985         }
3986
3987         addr_high = (tp->dev->dev_addr[0] +
3988                      tp->dev->dev_addr[1] +
3989                      tp->dev->dev_addr[2] +
3990                      tp->dev->dev_addr[3] +
3991                      tp->dev->dev_addr[4] +
3992                      tp->dev->dev_addr[5]) &
3993                 TX_BACKOFF_SEED_MASK;
3994         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3995 }
3996
3997 static void tg3_enable_register_access(struct tg3 *tp)
3998 {
3999         /*
4000          * Make sure register accesses (indirect or otherwise) will function
4001          * correctly.
4002          */
4003         pci_write_config_dword(tp->pdev,
4004                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4005 }
4006
4007 static int tg3_power_up(struct tg3 *tp)
4008 {
4009         int err;
4010
4011         tg3_enable_register_access(tp);
4012
4013         err = pci_set_power_state(tp->pdev, PCI_D0);
4014         if (!err) {
4015                 /* Switch out of Vaux if it is a NIC */
4016                 tg3_pwrsrc_switch_to_vmain(tp);
4017         } else {
4018                 netdev_err(tp->dev, "Transition to D0 failed\n");
4019         }
4020
4021         return err;
4022 }
4023
4024 static int tg3_setup_phy(struct tg3 *, bool);
4025
4026 static int tg3_power_down_prepare(struct tg3 *tp)
4027 {
4028         u32 misc_host_ctrl;
4029         bool device_should_wake, do_low_power;
4030
4031         tg3_enable_register_access(tp);
4032
4033         /* Restore the CLKREQ setting. */
4034         if (tg3_flag(tp, CLKREQ_BUG))
4035                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4036                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4037
4038         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4039         tw32(TG3PCI_MISC_HOST_CTRL,
4040              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4041
4042         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4043                              tg3_flag(tp, WOL_ENABLE);
4044
4045         if (tg3_flag(tp, USE_PHYLIB)) {
4046                 do_low_power = false;
4047                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4048                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4049                         struct phy_device *phydev;
4050                         u32 phyid, advertising;
4051
4052                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4053
4054                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4055
4056                         tp->link_config.speed = phydev->speed;
4057                         tp->link_config.duplex = phydev->duplex;
4058                         tp->link_config.autoneg = phydev->autoneg;
4059                         tp->link_config.advertising = phydev->advertising;
4060
4061                         advertising = ADVERTISED_TP |
4062                                       ADVERTISED_Pause |
4063                                       ADVERTISED_Autoneg |
4064                                       ADVERTISED_10baseT_Half;
4065
4066                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4067                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4068                                         advertising |=
4069                                                 ADVERTISED_100baseT_Half |
4070                                                 ADVERTISED_100baseT_Full |
4071                                                 ADVERTISED_10baseT_Full;
4072                                 else
4073                                         advertising |= ADVERTISED_10baseT_Full;
4074                         }
4075
4076                         phydev->advertising = advertising;
4077
4078                         phy_start_aneg(phydev);
4079
4080                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4081                         if (phyid != PHY_ID_BCMAC131) {
4082                                 phyid &= PHY_BCM_OUI_MASK;
4083                                 if (phyid == PHY_BCM_OUI_1 ||
4084                                     phyid == PHY_BCM_OUI_2 ||
4085                                     phyid == PHY_BCM_OUI_3)
4086                                         do_low_power = true;
4087                         }
4088                 }
4089         } else {
4090                 do_low_power = true;
4091
4092                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4093                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4094
4095                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4096                         tg3_setup_phy(tp, false);
4097         }
4098
4099         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4100                 u32 val;
4101
4102                 val = tr32(GRC_VCPU_EXT_CTRL);
4103                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4104         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4105                 int i;
4106                 u32 val;
4107
4108                 for (i = 0; i < 200; i++) {
4109                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4110                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4111                                 break;
4112                         msleep(1);
4113                 }
4114         }
4115         if (tg3_flag(tp, WOL_CAP))
4116                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4117                                                      WOL_DRV_STATE_SHUTDOWN |
4118                                                      WOL_DRV_WOL |
4119                                                      WOL_SET_MAGIC_PKT);
4120
4121         if (device_should_wake) {
4122                 u32 mac_mode;
4123
4124                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4125                         if (do_low_power &&
4126                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4127                                 tg3_phy_auxctl_write(tp,
4128                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4129                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4130                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4131                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4132                                 udelay(40);
4133                         }
4134
4135                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4136                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4137                         else if (tp->phy_flags &
4138                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4139                                 if (tp->link_config.active_speed == SPEED_1000)
4140                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4141                                 else
4142                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4143                         } else
4144                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4145
4146                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4147                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4148                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4149                                              SPEED_100 : SPEED_10;
4150                                 if (tg3_5700_link_polarity(tp, speed))
4151                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4152                                 else
4153                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4154                         }
4155                 } else {
4156                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4157                 }
4158
4159                 if (!tg3_flag(tp, 5750_PLUS))
4160                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4161
4162                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4163                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4164                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4165                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4166
4167                 if (tg3_flag(tp, ENABLE_APE))
4168                         mac_mode |= MAC_MODE_APE_TX_EN |
4169                                     MAC_MODE_APE_RX_EN |
4170                                     MAC_MODE_TDE_ENABLE;
4171
4172                 tw32_f(MAC_MODE, mac_mode);
4173                 udelay(100);
4174
4175                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4176                 udelay(10);
4177         }
4178
4179         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4180             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4181              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4182                 u32 base_val;
4183
4184                 base_val = tp->pci_clock_ctrl;
4185                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4186                              CLOCK_CTRL_TXCLK_DISABLE);
4187
4188                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4189                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4190         } else if (tg3_flag(tp, 5780_CLASS) ||
4191                    tg3_flag(tp, CPMU_PRESENT) ||
4192                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4193                 /* do nothing */
4194         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4195                 u32 newbits1, newbits2;
4196
4197                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4198                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4199                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4200                                     CLOCK_CTRL_TXCLK_DISABLE |
4201                                     CLOCK_CTRL_ALTCLK);
4202                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4203                 } else if (tg3_flag(tp, 5705_PLUS)) {
4204                         newbits1 = CLOCK_CTRL_625_CORE;
4205                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4206                 } else {
4207                         newbits1 = CLOCK_CTRL_ALTCLK;
4208                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209                 }
4210
4211                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4212                             40);
4213
4214                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4215                             40);
4216
4217                 if (!tg3_flag(tp, 5705_PLUS)) {
4218                         u32 newbits3;
4219
4220                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4221                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4222                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4223                                             CLOCK_CTRL_TXCLK_DISABLE |
4224                                             CLOCK_CTRL_44MHZ_CORE);
4225                         } else {
4226                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4227                         }
4228
4229                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4230                                     tp->pci_clock_ctrl | newbits3, 40);
4231                 }
4232         }
4233
4234         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4235                 tg3_power_down_phy(tp, do_low_power);
4236
4237         tg3_frob_aux_power(tp, true);
4238
4239         /* Workaround for unstable PLL clock */
4240         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4241             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4242              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4243                 u32 val = tr32(0x7d00);
4244
4245                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4246                 tw32(0x7d00, val);
4247                 if (!tg3_flag(tp, ENABLE_ASF)) {
4248                         int err;
4249
4250                         err = tg3_nvram_lock(tp);
4251                         tg3_halt_cpu(tp, RX_CPU_BASE);
4252                         if (!err)
4253                                 tg3_nvram_unlock(tp);
4254                 }
4255         }
4256
4257         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4258
4259         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4260
4261         return 0;
4262 }
4263
4264 static void tg3_power_down(struct tg3 *tp)
4265 {
4266         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4267         pci_set_power_state(tp->pdev, PCI_D3hot);
4268 }
4269
4270 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4271 {
4272         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4273         case MII_TG3_AUX_STAT_10HALF:
4274                 *speed = SPEED_10;
4275                 *duplex = DUPLEX_HALF;
4276                 break;
4277
4278         case MII_TG3_AUX_STAT_10FULL:
4279                 *speed = SPEED_10;
4280                 *duplex = DUPLEX_FULL;
4281                 break;
4282
4283         case MII_TG3_AUX_STAT_100HALF:
4284                 *speed = SPEED_100;
4285                 *duplex = DUPLEX_HALF;
4286                 break;
4287
4288         case MII_TG3_AUX_STAT_100FULL:
4289                 *speed = SPEED_100;
4290                 *duplex = DUPLEX_FULL;
4291                 break;
4292
4293         case MII_TG3_AUX_STAT_1000HALF:
4294                 *speed = SPEED_1000;
4295                 *duplex = DUPLEX_HALF;
4296                 break;
4297
4298         case MII_TG3_AUX_STAT_1000FULL:
4299                 *speed = SPEED_1000;
4300                 *duplex = DUPLEX_FULL;
4301                 break;
4302
4303         default:
4304                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4305                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4306                                  SPEED_10;
4307                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4308                                   DUPLEX_HALF;
4309                         break;
4310                 }
4311                 *speed = SPEED_UNKNOWN;
4312                 *duplex = DUPLEX_UNKNOWN;
4313                 break;
4314         }
4315 }
4316
4317 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4318 {
4319         int err = 0;
4320         u32 val, new_adv;
4321
4322         new_adv = ADVERTISE_CSMA;
4323         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4324         new_adv |= mii_advertise_flowctrl(flowctrl);
4325
4326         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4327         if (err)
4328                 goto done;
4329
4330         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4331                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4332
4333                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4334                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4335                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4336
4337                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4338                 if (err)
4339                         goto done;
4340         }
4341
4342         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4343                 goto done;
4344
4345         tw32(TG3_CPMU_EEE_MODE,
4346              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4347
4348         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4349         if (!err) {
4350                 u32 err2;
4351
4352                 val = 0;
4353                 /* Advertise 100-BaseTX EEE ability */
4354                 if (advertise & ADVERTISED_100baseT_Full)
4355                         val |= MDIO_AN_EEE_ADV_100TX;
4356                 /* Advertise 1000-BaseT EEE ability */
4357                 if (advertise & ADVERTISED_1000baseT_Full)
4358                         val |= MDIO_AN_EEE_ADV_1000T;
4359
4360                 if (!tp->eee.eee_enabled) {
4361                         val = 0;
4362                         tp->eee.advertised = 0;
4363                 } else {
4364                         tp->eee.advertised = advertise &
4365                                              (ADVERTISED_100baseT_Full |
4366                                               ADVERTISED_1000baseT_Full);
4367                 }
4368
4369                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4370                 if (err)
4371                         val = 0;
4372
4373                 switch (tg3_asic_rev(tp)) {
4374                 case ASIC_REV_5717:
4375                 case ASIC_REV_57765:
4376                 case ASIC_REV_57766:
4377                 case ASIC_REV_5719:
4378                         /* If we advertised any eee advertisements above... */
4379                         if (val)
4380                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4381                                       MII_TG3_DSP_TAP26_RMRXSTO |
4382                                       MII_TG3_DSP_TAP26_OPCSINPT;
4383                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4384                         /* Fall through */
4385                 case ASIC_REV_5720:
4386                 case ASIC_REV_5762:
4387                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4388                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4389                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4390                 }
4391
4392                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4393                 if (!err)
4394                         err = err2;
4395         }
4396
4397 done:
4398         return err;
4399 }
4400
4401 static void tg3_phy_copper_begin(struct tg3 *tp)
4402 {
4403         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4404             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4405                 u32 adv, fc;
4406
4407                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4408                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4409                         adv = ADVERTISED_10baseT_Half |
4410                               ADVERTISED_10baseT_Full;
4411                         if (tg3_flag(tp, WOL_SPEED_100MB))
4412                                 adv |= ADVERTISED_100baseT_Half |
4413                                        ADVERTISED_100baseT_Full;
4414                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4415                                 if (!(tp->phy_flags &
4416                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4417                                         adv |= ADVERTISED_1000baseT_Half;
4418                                 adv |= ADVERTISED_1000baseT_Full;
4419                         }
4420
4421                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4422                 } else {
4423                         adv = tp->link_config.advertising;
4424                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4425                                 adv &= ~(ADVERTISED_1000baseT_Half |
4426                                          ADVERTISED_1000baseT_Full);
4427
4428                         fc = tp->link_config.flowctrl;
4429                 }
4430
4431                 tg3_phy_autoneg_cfg(tp, adv, fc);
4432
4433                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4434                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4435                         /* Normally during power down we want to autonegotiate
4436                          * the lowest possible speed for WOL. However, to avoid
4437                          * link flap, we leave it untouched.
4438                          */
4439                         return;
4440                 }
4441
4442                 tg3_writephy(tp, MII_BMCR,
4443                              BMCR_ANENABLE | BMCR_ANRESTART);
4444         } else {
4445                 int i;
4446                 u32 bmcr, orig_bmcr;
4447
4448                 tp->link_config.active_speed = tp->link_config.speed;
4449                 tp->link_config.active_duplex = tp->link_config.duplex;
4450
4451                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4452                         /* With autoneg disabled, 5715 only links up when the
4453                          * advertisement register has the configured speed
4454                          * enabled.
4455                          */
4456                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4457                 }
4458
4459                 bmcr = 0;
4460                 switch (tp->link_config.speed) {
4461                 default:
4462                 case SPEED_10:
4463                         break;
4464
4465                 case SPEED_100:
4466                         bmcr |= BMCR_SPEED100;
4467                         break;
4468
4469                 case SPEED_1000:
4470                         bmcr |= BMCR_SPEED1000;
4471                         break;
4472                 }
4473
4474                 if (tp->link_config.duplex == DUPLEX_FULL)
4475                         bmcr |= BMCR_FULLDPLX;
4476
4477                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4478                     (bmcr != orig_bmcr)) {
4479                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4480                         for (i = 0; i < 1500; i++) {
4481                                 u32 tmp;
4482
4483                                 udelay(10);
4484                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4485                                     tg3_readphy(tp, MII_BMSR, &tmp))
4486                                         continue;
4487                                 if (!(tmp & BMSR_LSTATUS)) {
4488                                         udelay(40);
4489                                         break;
4490                                 }
4491                         }
4492                         tg3_writephy(tp, MII_BMCR, bmcr);
4493                         udelay(40);
4494                 }
4495         }
4496 }
4497
4498 static int tg3_phy_pull_config(struct tg3 *tp)
4499 {
4500         int err;
4501         u32 val;
4502
4503         err = tg3_readphy(tp, MII_BMCR, &val);
4504         if (err)
4505                 goto done;
4506
4507         if (!(val & BMCR_ANENABLE)) {
4508                 tp->link_config.autoneg = AUTONEG_DISABLE;
4509                 tp->link_config.advertising = 0;
4510                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4511
4512                 err = -EIO;
4513
4514                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4515                 case 0:
4516                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517                                 goto done;
4518
4519                         tp->link_config.speed = SPEED_10;
4520                         break;
4521                 case BMCR_SPEED100:
4522                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523                                 goto done;
4524
4525                         tp->link_config.speed = SPEED_100;
4526                         break;
4527                 case BMCR_SPEED1000:
4528                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4529                                 tp->link_config.speed = SPEED_1000;
4530                                 break;
4531                         }
4532                         /* Fall through */
4533                 default:
4534                         goto done;
4535                 }
4536
4537                 if (val & BMCR_FULLDPLX)
4538                         tp->link_config.duplex = DUPLEX_FULL;
4539                 else
4540                         tp->link_config.duplex = DUPLEX_HALF;
4541
4542                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4543
4544                 err = 0;
4545                 goto done;
4546         }
4547
4548         tp->link_config.autoneg = AUTONEG_ENABLE;
4549         tp->link_config.advertising = ADVERTISED_Autoneg;
4550         tg3_flag_set(tp, PAUSE_AUTONEG);
4551
4552         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4553                 u32 adv;
4554
4555                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4556                 if (err)
4557                         goto done;
4558
4559                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4560                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4561
4562                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4563         } else {
4564                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4565         }
4566
4567         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4568                 u32 adv;
4569
4570                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4571                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4572                         if (err)
4573                                 goto done;
4574
4575                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4576                 } else {
4577                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4578                         if (err)
4579                                 goto done;
4580
4581                         adv = tg3_decode_flowctrl_1000X(val);
4582                         tp->link_config.flowctrl = adv;
4583
4584                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4585                         adv = mii_adv_to_ethtool_adv_x(val);
4586                 }
4587
4588                 tp->link_config.advertising |= adv;
4589         }
4590
4591 done:
4592         return err;
4593 }
4594
4595 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4596 {
4597         int err;
4598
4599         /* Turn off tap power management. */
4600         /* Set Extended packet length bit */
4601         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4602
4603         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4604         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4605         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4606         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4607         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4608
4609         udelay(40);
4610
4611         return err;
4612 }
4613
4614 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4615 {
4616         struct ethtool_eee eee;
4617
4618         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4619                 return true;
4620
4621         tg3_eee_pull_config(tp, &eee);
4622
4623         if (tp->eee.eee_enabled) {
4624                 if (tp->eee.advertised != eee.advertised ||
4625                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4626                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4627                         return false;
4628         } else {
4629                 /* EEE is disabled but we're advertising */
4630                 if (eee.advertised)
4631                         return false;
4632         }
4633
4634         return true;
4635 }
4636
4637 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4638 {
4639         u32 advmsk, tgtadv, advertising;
4640
4641         advertising = tp->link_config.advertising;
4642         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4643
4644         advmsk = ADVERTISE_ALL;
4645         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4646                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4647                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4648         }
4649
4650         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4651                 return false;
4652
4653         if ((*lcladv & advmsk) != tgtadv)
4654                 return false;
4655
4656         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4657                 u32 tg3_ctrl;
4658
4659                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4660
4661                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4662                         return false;
4663
4664                 if (tgtadv &&
4665                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4666                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4667                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4668                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4669                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4670                 } else {
4671                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4672                 }
4673
4674                 if (tg3_ctrl != tgtadv)
4675                         return false;
4676         }
4677
4678         return true;
4679 }
4680
4681 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4682 {
4683         u32 lpeth = 0;
4684
4685         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4686                 u32 val;
4687
4688                 if (tg3_readphy(tp, MII_STAT1000, &val))
4689                         return false;
4690
4691                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4692         }
4693
4694         if (tg3_readphy(tp, MII_LPA, rmtadv))
4695                 return false;
4696
4697         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4698         tp->link_config.rmt_adv = lpeth;
4699
4700         return true;
4701 }
4702
4703 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4704 {
4705         if (curr_link_up != tp->link_up) {
4706                 if (curr_link_up) {
4707                         netif_carrier_on(tp->dev);
4708                 } else {
4709                         netif_carrier_off(tp->dev);
4710                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4711                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4712                 }
4713
4714                 tg3_link_report(tp);
4715                 return true;
4716         }
4717
4718         return false;
4719 }
4720
4721 static void tg3_clear_mac_status(struct tg3 *tp)
4722 {
4723         tw32(MAC_EVENT, 0);
4724
4725         tw32_f(MAC_STATUS,
4726                MAC_STATUS_SYNC_CHANGED |
4727                MAC_STATUS_CFG_CHANGED |
4728                MAC_STATUS_MI_COMPLETION |
4729                MAC_STATUS_LNKSTATE_CHANGED);
4730         udelay(40);
4731 }
4732
4733 static void tg3_setup_eee(struct tg3 *tp)
4734 {
4735         u32 val;
4736
4737         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4738               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4739         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4740                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4741
4742         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4743
4744         tw32_f(TG3_CPMU_EEE_CTRL,
4745                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4746
4747         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4748               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4749               TG3_CPMU_EEEMD_LPI_IN_RX |
4750               TG3_CPMU_EEEMD_EEE_ENABLE;
4751
4752         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4753                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4754
4755         if (tg3_flag(tp, ENABLE_APE))
4756                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4757
4758         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4759
4760         tw32_f(TG3_CPMU_EEE_DBTMR1,
4761                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4762                (tp->eee.tx_lpi_timer & 0xffff));
4763
4764         tw32_f(TG3_CPMU_EEE_DBTMR2,
4765                TG3_CPMU_DBTMR2_APE_TX_2047US |
4766                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4767 }
4768
4769 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4770 {
4771         bool current_link_up;
4772         u32 bmsr, val;
4773         u32 lcl_adv, rmt_adv;
4774         u16 current_speed;
4775         u8 current_duplex;
4776         int i, err;
4777
4778         tg3_clear_mac_status(tp);
4779
4780         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4781                 tw32_f(MAC_MI_MODE,
4782                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4783                 udelay(80);
4784         }
4785
4786         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4787
4788         /* Some third-party PHYs need to be reset on link going
4789          * down.
4790          */
4791         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4792              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4793              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4794             tp->link_up) {
4795                 tg3_readphy(tp, MII_BMSR, &bmsr);
4796                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4797                     !(bmsr & BMSR_LSTATUS))
4798                         force_reset = true;
4799         }
4800         if (force_reset)
4801                 tg3_phy_reset(tp);
4802
4803         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4804                 tg3_readphy(tp, MII_BMSR, &bmsr);
4805                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4806                     !tg3_flag(tp, INIT_COMPLETE))
4807                         bmsr = 0;
4808
4809                 if (!(bmsr & BMSR_LSTATUS)) {
4810                         err = tg3_init_5401phy_dsp(tp);
4811                         if (err)
4812                                 return err;
4813
4814                         tg3_readphy(tp, MII_BMSR, &bmsr);
4815                         for (i = 0; i < 1000; i++) {
4816                                 udelay(10);
4817                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4818                                     (bmsr & BMSR_LSTATUS)) {
4819                                         udelay(40);
4820                                         break;
4821                                 }
4822                         }
4823
4824                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4825                             TG3_PHY_REV_BCM5401_B0 &&
4826                             !(bmsr & BMSR_LSTATUS) &&
4827                             tp->link_config.active_speed == SPEED_1000) {
4828                                 err = tg3_phy_reset(tp);
4829                                 if (!err)
4830                                         err = tg3_init_5401phy_dsp(tp);
4831                                 if (err)
4832                                         return err;
4833                         }
4834                 }
4835         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4836                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4837                 /* 5701 {A0,B0} CRC bug workaround */
4838                 tg3_writephy(tp, 0x15, 0x0a75);
4839                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4840                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4841                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4842         }
4843
4844         /* Clear pending interrupts... */
4845         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4846         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4847
4848         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4849                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4850         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4851                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4852
4853         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4854             tg3_asic_rev(tp) == ASIC_REV_5701) {
4855                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4856                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4857                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4858                 else
4859                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4860         }
4861
4862         current_link_up = false;
4863         current_speed = SPEED_UNKNOWN;
4864         current_duplex = DUPLEX_UNKNOWN;
4865         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4866         tp->link_config.rmt_adv = 0;
4867
4868         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4869                 err = tg3_phy_auxctl_read(tp,
4870                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4871                                           &val);
4872                 if (!err && !(val & (1 << 10))) {
4873                         tg3_phy_auxctl_write(tp,
4874                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4875                                              val | (1 << 10));
4876                         goto relink;
4877                 }
4878         }
4879
4880         bmsr = 0;
4881         for (i = 0; i < 100; i++) {
4882                 tg3_readphy(tp, MII_BMSR, &bmsr);
4883                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4884                     (bmsr & BMSR_LSTATUS))
4885                         break;
4886                 udelay(40);
4887         }
4888
4889         if (bmsr & BMSR_LSTATUS) {
4890                 u32 aux_stat, bmcr;
4891
4892                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4893                 for (i = 0; i < 2000; i++) {
4894                         udelay(10);
4895                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4896                             aux_stat)
4897                                 break;
4898                 }
4899
4900                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4901                                              &current_speed,
4902                                              &current_duplex);
4903
4904                 bmcr = 0;
4905                 for (i = 0; i < 200; i++) {
4906                         tg3_readphy(tp, MII_BMCR, &bmcr);
4907                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4908                                 continue;
4909                         if (bmcr && bmcr != 0x7fff)
4910                                 break;
4911                         udelay(10);
4912                 }
4913
4914                 lcl_adv = 0;
4915                 rmt_adv = 0;
4916
4917                 tp->link_config.active_speed = current_speed;
4918                 tp->link_config.active_duplex = current_duplex;
4919
4920                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4921                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4922
4923                         if ((bmcr & BMCR_ANENABLE) &&
4924                             eee_config_ok &&
4925                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4926                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4927                                 current_link_up = true;
4928
4929                         /* EEE settings changes take effect only after a phy
4930                          * reset.  If we have skipped a reset due to Link Flap
4931                          * Avoidance being enabled, do it now.
4932                          */
4933                         if (!eee_config_ok &&
4934                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4935                             !force_reset) {
4936                                 tg3_setup_eee(tp);
4937                                 tg3_phy_reset(tp);
4938                         }
4939                 } else {
4940                         if (!(bmcr & BMCR_ANENABLE) &&
4941                             tp->link_config.speed == current_speed &&
4942                             tp->link_config.duplex == current_duplex) {
4943                                 current_link_up = true;
4944                         }
4945                 }
4946
4947                 if (current_link_up &&
4948                     tp->link_config.active_duplex == DUPLEX_FULL) {
4949                         u32 reg, bit;
4950
4951                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4952                                 reg = MII_TG3_FET_GEN_STAT;
4953                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4954                         } else {
4955                                 reg = MII_TG3_EXT_STAT;
4956                                 bit = MII_TG3_EXT_STAT_MDIX;
4957                         }
4958
4959                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4960                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4961
4962                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4963                 }
4964         }
4965
4966 relink:
4967         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4968                 tg3_phy_copper_begin(tp);
4969
4970                 if (tg3_flag(tp, ROBOSWITCH)) {
4971                         current_link_up = true;
4972                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4973                         current_speed = SPEED_1000;
4974                         current_duplex = DUPLEX_FULL;
4975                         tp->link_config.active_speed = current_speed;
4976                         tp->link_config.active_duplex = current_duplex;
4977                 }
4978
4979                 tg3_readphy(tp, MII_BMSR, &bmsr);
4980                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4981                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4982                         current_link_up = true;
4983         }
4984
4985         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4986         if (current_link_up) {
4987                 if (tp->link_config.active_speed == SPEED_100 ||
4988                     tp->link_config.active_speed == SPEED_10)
4989                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4990                 else
4991                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4992         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4993                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4994         else
4995                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996
4997         /* In order for the 5750 core in BCM4785 chip to work properly
4998          * in RGMII mode, the Led Control Register must be set up.
4999          */
5000         if (tg3_flag(tp, RGMII_MODE)) {
5001                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5002                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5003
5004                 if (tp->link_config.active_speed == SPEED_10)
5005                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5006                 else if (tp->link_config.active_speed == SPEED_100)
5007                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5008                                      LED_CTRL_100MBPS_ON);
5009                 else if (tp->link_config.active_speed == SPEED_1000)
5010                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5011                                      LED_CTRL_1000MBPS_ON);
5012
5013                 tw32(MAC_LED_CTRL, led_ctrl);
5014                 udelay(40);
5015         }
5016
5017         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5018         if (tp->link_config.active_duplex == DUPLEX_HALF)
5019                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5020
5021         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5022                 if (current_link_up &&
5023                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5024                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5025                 else
5026                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5027         }
5028
5029         /* ??? Without this setting Netgear GA302T PHY does not
5030          * ??? send/receive packets...
5031          */
5032         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5033             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5034                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5035                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5036                 udelay(80);
5037         }
5038
5039         tw32_f(MAC_MODE, tp->mac_mode);
5040         udelay(40);
5041
5042         tg3_phy_eee_adjust(tp, current_link_up);
5043
5044         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5045                 /* Polled via timer. */
5046                 tw32_f(MAC_EVENT, 0);
5047         } else {
5048                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5049         }
5050         udelay(40);
5051
5052         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5053             current_link_up &&
5054             tp->link_config.active_speed == SPEED_1000 &&
5055             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5056                 udelay(120);
5057                 tw32_f(MAC_STATUS,
5058                      (MAC_STATUS_SYNC_CHANGED |
5059                       MAC_STATUS_CFG_CHANGED));
5060                 udelay(40);
5061                 tg3_write_mem(tp,
5062                               NIC_SRAM_FIRMWARE_MBOX,
5063                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5064         }
5065
5066         /* Prevent send BD corruption. */
5067         if (tg3_flag(tp, CLKREQ_BUG)) {
5068                 if (tp->link_config.active_speed == SPEED_100 ||
5069                     tp->link_config.active_speed == SPEED_10)
5070                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5071                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5072                 else
5073                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5074                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5075         }
5076
5077         tg3_test_and_report_link_chg(tp, current_link_up);
5078
5079         return 0;
5080 }
5081
5082 struct tg3_fiber_aneginfo {
5083         int state;
5084 #define ANEG_STATE_UNKNOWN              0
5085 #define ANEG_STATE_AN_ENABLE            1
5086 #define ANEG_STATE_RESTART_INIT         2
5087 #define ANEG_STATE_RESTART              3
5088 #define ANEG_STATE_DISABLE_LINK_OK      4
5089 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5090 #define ANEG_STATE_ABILITY_DETECT       6
5091 #define ANEG_STATE_ACK_DETECT_INIT      7
5092 #define ANEG_STATE_ACK_DETECT           8
5093 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5094 #define ANEG_STATE_COMPLETE_ACK         10
5095 #define ANEG_STATE_IDLE_DETECT_INIT     11
5096 #define ANEG_STATE_IDLE_DETECT          12
5097 #define ANEG_STATE_LINK_OK              13
5098 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5099 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5100
5101         u32 flags;
5102 #define MR_AN_ENABLE            0x00000001
5103 #define MR_RESTART_AN           0x00000002
5104 #define MR_AN_COMPLETE          0x00000004
5105 #define MR_PAGE_RX              0x00000008
5106 #define MR_NP_LOADED            0x00000010
5107 #define MR_TOGGLE_TX            0x00000020
5108 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5109 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5110 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5111 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5112 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5113 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5114 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5115 #define MR_TOGGLE_RX            0x00002000
5116 #define MR_NP_RX                0x00004000
5117
5118 #define MR_LINK_OK              0x80000000
5119
5120         unsigned long link_time, cur_time;
5121
5122         u32 ability_match_cfg;
5123         int ability_match_count;
5124
5125         char ability_match, idle_match, ack_match;
5126
5127         u32 txconfig, rxconfig;
5128 #define ANEG_CFG_NP             0x00000080
5129 #define ANEG_CFG_ACK            0x00000040
5130 #define ANEG_CFG_RF2            0x00000020
5131 #define ANEG_CFG_RF1            0x00000010
5132 #define ANEG_CFG_PS2            0x00000001
5133 #define ANEG_CFG_PS1            0x00008000
5134 #define ANEG_CFG_HD             0x00004000
5135 #define ANEG_CFG_FD             0x00002000
5136 #define ANEG_CFG_INVAL          0x00001f06
5137
5138 };
5139 #define ANEG_OK         0
5140 #define ANEG_DONE       1
5141 #define ANEG_TIMER_ENAB 2
5142 #define ANEG_FAILED     -1
5143
5144 #define ANEG_STATE_SETTLE_TIME  10000
5145
5146 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5147                                    struct tg3_fiber_aneginfo *ap)
5148 {
5149         u16 flowctrl;
5150         unsigned long delta;
5151         u32 rx_cfg_reg;
5152         int ret;
5153
5154         if (ap->state == ANEG_STATE_UNKNOWN) {
5155                 ap->rxconfig = 0;
5156                 ap->link_time = 0;
5157                 ap->cur_time = 0;
5158                 ap->ability_match_cfg = 0;
5159                 ap->ability_match_count = 0;
5160                 ap->ability_match = 0;
5161                 ap->idle_match = 0;
5162                 ap->ack_match = 0;
5163         }
5164         ap->cur_time++;
5165
5166         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5167                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5168
5169                 if (rx_cfg_reg != ap->ability_match_cfg) {
5170                         ap->ability_match_cfg = rx_cfg_reg;
5171                         ap->ability_match = 0;
5172                         ap->ability_match_count = 0;
5173                 } else {
5174                         if (++ap->ability_match_count > 1) {
5175                                 ap->ability_match = 1;
5176                                 ap->ability_match_cfg = rx_cfg_reg;
5177                         }
5178                 }
5179                 if (rx_cfg_reg & ANEG_CFG_ACK)
5180                         ap->ack_match = 1;
5181                 else
5182                         ap->ack_match = 0;
5183
5184                 ap->idle_match = 0;
5185         } else {
5186                 ap->idle_match = 1;
5187                 ap->ability_match_cfg = 0;
5188                 ap->ability_match_count = 0;
5189                 ap->ability_match = 0;
5190                 ap->ack_match = 0;
5191
5192                 rx_cfg_reg = 0;
5193         }
5194
5195         ap->rxconfig = rx_cfg_reg;
5196         ret = ANEG_OK;
5197
5198         switch (ap->state) {
5199         case ANEG_STATE_UNKNOWN:
5200                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5201                         ap->state = ANEG_STATE_AN_ENABLE;
5202
5203                 /* fallthru */
5204         case ANEG_STATE_AN_ENABLE:
5205                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5206                 if (ap->flags & MR_AN_ENABLE) {
5207                         ap->link_time = 0;
5208                         ap->cur_time = 0;
5209                         ap->ability_match_cfg = 0;
5210                         ap->ability_match_count = 0;
5211                         ap->ability_match = 0;
5212                         ap->idle_match = 0;
5213                         ap->ack_match = 0;
5214
5215                         ap->state = ANEG_STATE_RESTART_INIT;
5216                 } else {
5217                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5218                 }
5219                 break;
5220
5221         case ANEG_STATE_RESTART_INIT:
5222                 ap->link_time = ap->cur_time;
5223                 ap->flags &= ~(MR_NP_LOADED);
5224                 ap->txconfig = 0;
5225                 tw32(MAC_TX_AUTO_NEG, 0);
5226                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5227                 tw32_f(MAC_MODE, tp->mac_mode);
5228                 udelay(40);
5229
5230                 ret = ANEG_TIMER_ENAB;
5231                 ap->state = ANEG_STATE_RESTART;
5232
5233                 /* fallthru */
5234         case ANEG_STATE_RESTART:
5235                 delta = ap->cur_time - ap->link_time;
5236                 if (delta > ANEG_STATE_SETTLE_TIME)
5237                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5238                 else
5239                         ret = ANEG_TIMER_ENAB;
5240                 break;
5241
5242         case ANEG_STATE_DISABLE_LINK_OK:
5243                 ret = ANEG_DONE;
5244                 break;
5245
5246         case ANEG_STATE_ABILITY_DETECT_INIT:
5247                 ap->flags &= ~(MR_TOGGLE_TX);
5248                 ap->txconfig = ANEG_CFG_FD;
5249                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5250                 if (flowctrl & ADVERTISE_1000XPAUSE)
5251                         ap->txconfig |= ANEG_CFG_PS1;
5252                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5253                         ap->txconfig |= ANEG_CFG_PS2;
5254                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5255                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5256                 tw32_f(MAC_MODE, tp->mac_mode);
5257                 udelay(40);
5258
5259                 ap->state = ANEG_STATE_ABILITY_DETECT;
5260                 break;
5261
5262         case ANEG_STATE_ABILITY_DETECT:
5263                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5264                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5265                 break;
5266
5267         case ANEG_STATE_ACK_DETECT_INIT:
5268                 ap->txconfig |= ANEG_CFG_ACK;
5269                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271                 tw32_f(MAC_MODE, tp->mac_mode);
5272                 udelay(40);
5273
5274                 ap->state = ANEG_STATE_ACK_DETECT;
5275
5276                 /* fallthru */
5277         case ANEG_STATE_ACK_DETECT:
5278                 if (ap->ack_match != 0) {
5279                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5280                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5281                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5282                         } else {
5283                                 ap->state = ANEG_STATE_AN_ENABLE;
5284                         }
5285                 } else if (ap->ability_match != 0 &&
5286                            ap->rxconfig == 0) {
5287                         ap->state = ANEG_STATE_AN_ENABLE;
5288                 }
5289                 break;
5290
5291         case ANEG_STATE_COMPLETE_ACK_INIT:
5292                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5293                         ret = ANEG_FAILED;
5294                         break;
5295                 }
5296                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5297                                MR_LP_ADV_HALF_DUPLEX |
5298                                MR_LP_ADV_SYM_PAUSE |
5299                                MR_LP_ADV_ASYM_PAUSE |
5300                                MR_LP_ADV_REMOTE_FAULT1 |
5301                                MR_LP_ADV_REMOTE_FAULT2 |
5302                                MR_LP_ADV_NEXT_PAGE |
5303                                MR_TOGGLE_RX |
5304                                MR_NP_RX);
5305                 if (ap->rxconfig & ANEG_CFG_FD)
5306                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5307                 if (ap->rxconfig & ANEG_CFG_HD)
5308                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5309                 if (ap->rxconfig & ANEG_CFG_PS1)
5310                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5311                 if (ap->rxconfig & ANEG_CFG_PS2)
5312                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5313                 if (ap->rxconfig & ANEG_CFG_RF1)
5314                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5315                 if (ap->rxconfig & ANEG_CFG_RF2)
5316                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5317                 if (ap->rxconfig & ANEG_CFG_NP)
5318                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5319
5320                 ap->link_time = ap->cur_time;
5321
5322                 ap->flags ^= (MR_TOGGLE_TX);
5323                 if (ap->rxconfig & 0x0008)
5324                         ap->flags |= MR_TOGGLE_RX;
5325                 if (ap->rxconfig & ANEG_CFG_NP)
5326                         ap->flags |= MR_NP_RX;
5327                 ap->flags |= MR_PAGE_RX;
5328
5329                 ap->state = ANEG_STATE_COMPLETE_ACK;
5330                 ret = ANEG_TIMER_ENAB;
5331                 break;
5332
5333         case ANEG_STATE_COMPLETE_ACK:
5334                 if (ap->ability_match != 0 &&
5335                     ap->rxconfig == 0) {
5336                         ap->state = ANEG_STATE_AN_ENABLE;
5337                         break;
5338                 }
5339                 delta = ap->cur_time - ap->link_time;
5340                 if (delta > ANEG_STATE_SETTLE_TIME) {
5341                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5342                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5343                         } else {
5344                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5345                                     !(ap->flags & MR_NP_RX)) {
5346                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5347                                 } else {
5348                                         ret = ANEG_FAILED;
5349                                 }
5350                         }
5351                 }
5352                 break;
5353
5354         case ANEG_STATE_IDLE_DETECT_INIT:
5355                 ap->link_time = ap->cur_time;
5356                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5357                 tw32_f(MAC_MODE, tp->mac_mode);
5358                 udelay(40);
5359
5360                 ap->state = ANEG_STATE_IDLE_DETECT;
5361                 ret = ANEG_TIMER_ENAB;
5362                 break;
5363
5364         case ANEG_STATE_IDLE_DETECT:
5365                 if (ap->ability_match != 0 &&
5366                     ap->rxconfig == 0) {
5367                         ap->state = ANEG_STATE_AN_ENABLE;
5368                         break;
5369                 }
5370                 delta = ap->cur_time - ap->link_time;
5371                 if (delta > ANEG_STATE_SETTLE_TIME) {
5372                         /* XXX another gem from the Broadcom driver :( */
5373                         ap->state = ANEG_STATE_LINK_OK;
5374                 }
5375                 break;
5376
5377         case ANEG_STATE_LINK_OK:
5378                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5379                 ret = ANEG_DONE;
5380                 break;
5381
5382         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5383                 /* ??? unimplemented */
5384                 break;
5385
5386         case ANEG_STATE_NEXT_PAGE_WAIT:
5387                 /* ??? unimplemented */
5388                 break;
5389
5390         default:
5391                 ret = ANEG_FAILED;
5392                 break;
5393         }
5394
5395         return ret;
5396 }
5397
5398 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5399 {
5400         int res = 0;
5401         struct tg3_fiber_aneginfo aninfo;
5402         int status = ANEG_FAILED;
5403         unsigned int tick;
5404         u32 tmp;
5405
5406         tw32_f(MAC_TX_AUTO_NEG, 0);
5407
5408         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5409         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5410         udelay(40);
5411
5412         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5413         udelay(40);
5414
5415         memset(&aninfo, 0, sizeof(aninfo));
5416         aninfo.flags |= MR_AN_ENABLE;
5417         aninfo.state = ANEG_STATE_UNKNOWN;
5418         aninfo.cur_time = 0;
5419         tick = 0;
5420         while (++tick < 195000) {
5421                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5422                 if (status == ANEG_DONE || status == ANEG_FAILED)
5423                         break;
5424
5425                 udelay(1);
5426         }
5427
5428         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5429         tw32_f(MAC_MODE, tp->mac_mode);
5430         udelay(40);
5431
5432         *txflags = aninfo.txconfig;
5433         *rxflags = aninfo.flags;
5434
5435         if (status == ANEG_DONE &&
5436             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5437                              MR_LP_ADV_FULL_DUPLEX)))
5438                 res = 1;
5439
5440         return res;
5441 }
5442
5443 static void tg3_init_bcm8002(struct tg3 *tp)
5444 {
5445         u32 mac_status = tr32(MAC_STATUS);
5446         int i;
5447
5448         /* Reset when initting first time or we have a link. */
5449         if (tg3_flag(tp, INIT_COMPLETE) &&
5450             !(mac_status & MAC_STATUS_PCS_SYNCED))
5451                 return;
5452
5453         /* Set PLL lock range. */
5454         tg3_writephy(tp, 0x16, 0x8007);
5455
5456         /* SW reset */
5457         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5458
5459         /* Wait for reset to complete. */
5460         /* XXX schedule_timeout() ... */
5461         for (i = 0; i < 500; i++)
5462                 udelay(10);
5463
5464         /* Config mode; select PMA/Ch 1 regs. */
5465         tg3_writephy(tp, 0x10, 0x8411);
5466
5467         /* Enable auto-lock and comdet, select txclk for tx. */
5468         tg3_writephy(tp, 0x11, 0x0a10);
5469
5470         tg3_writephy(tp, 0x18, 0x00a0);
5471         tg3_writephy(tp, 0x16, 0x41ff);
5472
5473         /* Assert and deassert POR. */
5474         tg3_writephy(tp, 0x13, 0x0400);
5475         udelay(40);
5476         tg3_writephy(tp, 0x13, 0x0000);
5477
5478         tg3_writephy(tp, 0x11, 0x0a50);
5479         udelay(40);
5480         tg3_writephy(tp, 0x11, 0x0a10);
5481
5482         /* Wait for signal to stabilize */
5483         /* XXX schedule_timeout() ... */
5484         for (i = 0; i < 15000; i++)
5485                 udelay(10);
5486
5487         /* Deselect the channel register so we can read the PHYID
5488          * later.
5489          */
5490         tg3_writephy(tp, 0x10, 0x8011);
5491 }
5492
5493 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5494 {
5495         u16 flowctrl;
5496         bool current_link_up;
5497         u32 sg_dig_ctrl, sg_dig_status;
5498         u32 serdes_cfg, expected_sg_dig_ctrl;
5499         int workaround, port_a;
5500
5501         serdes_cfg = 0;
5502         expected_sg_dig_ctrl = 0;
5503         workaround = 0;
5504         port_a = 1;
5505         current_link_up = false;
5506
5507         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5508             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5509                 workaround = 1;
5510                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5511                         port_a = 0;
5512
5513                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5514                 /* preserve bits 20-23 for voltage regulator */
5515                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5516         }
5517
5518         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5519
5520         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5521                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5522                         if (workaround) {
5523                                 u32 val = serdes_cfg;
5524
5525                                 if (port_a)
5526                                         val |= 0xc010000;
5527                                 else
5528                                         val |= 0x4010000;
5529                                 tw32_f(MAC_SERDES_CFG, val);
5530                         }
5531
5532                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5533                 }
5534                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5535                         tg3_setup_flow_control(tp, 0, 0);
5536                         current_link_up = true;
5537                 }
5538                 goto out;
5539         }
5540
5541         /* Want auto-negotiation.  */
5542         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5543
5544         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5545         if (flowctrl & ADVERTISE_1000XPAUSE)
5546                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5547         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5548                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5549
5550         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5551                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5552                     tp->serdes_counter &&
5553                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5554                                     MAC_STATUS_RCVD_CFG)) ==
5555                      MAC_STATUS_PCS_SYNCED)) {
5556                         tp->serdes_counter--;
5557                         current_link_up = true;
5558                         goto out;
5559                 }
5560 restart_autoneg:
5561                 if (workaround)
5562                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5563                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5564                 udelay(5);
5565                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5566
5567                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5568                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5569         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5570                                  MAC_STATUS_SIGNAL_DET)) {
5571                 sg_dig_status = tr32(SG_DIG_STATUS);
5572                 mac_status = tr32(MAC_STATUS);
5573
5574                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5575                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5576                         u32 local_adv = 0, remote_adv = 0;
5577
5578                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5579                                 local_adv |= ADVERTISE_1000XPAUSE;
5580                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5581                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5582
5583                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5584                                 remote_adv |= LPA_1000XPAUSE;
5585                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5586                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5587
5588                         tp->link_config.rmt_adv =
5589                                            mii_adv_to_ethtool_adv_x(remote_adv);
5590
5591                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5592                         current_link_up = true;
5593                         tp->serdes_counter = 0;
5594                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5595                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5596                         if (tp->serdes_counter)
5597                                 tp->serdes_counter--;
5598                         else {
5599                                 if (workaround) {
5600                                         u32 val = serdes_cfg;
5601
5602                                         if (port_a)
5603                                                 val |= 0xc010000;
5604                                         else
5605                                                 val |= 0x4010000;
5606
5607                                         tw32_f(MAC_SERDES_CFG, val);
5608                                 }
5609
5610                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5611                                 udelay(40);
5612
5613                                 /* Link parallel detection - link is up */
5614                                 /* only if we have PCS_SYNC and not */
5615                                 /* receiving config code words */
5616                                 mac_status = tr32(MAC_STATUS);
5617                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5618                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5619                                         tg3_setup_flow_control(tp, 0, 0);
5620                                         current_link_up = true;
5621                                         tp->phy_flags |=
5622                                                 TG3_PHYFLG_PARALLEL_DETECT;
5623                                         tp->serdes_counter =
5624                                                 SERDES_PARALLEL_DET_TIMEOUT;
5625                                 } else
5626                                         goto restart_autoneg;
5627                         }
5628                 }
5629         } else {
5630                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5631                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5632         }
5633
5634 out:
5635         return current_link_up;
5636 }
5637
5638 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5639 {
5640         bool current_link_up = false;
5641
5642         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5643                 goto out;
5644
5645         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5646                 u32 txflags, rxflags;
5647                 int i;
5648
5649                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5650                         u32 local_adv = 0, remote_adv = 0;
5651
5652                         if (txflags & ANEG_CFG_PS1)
5653                                 local_adv |= ADVERTISE_1000XPAUSE;
5654                         if (txflags & ANEG_CFG_PS2)
5655                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5656
5657                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5658                                 remote_adv |= LPA_1000XPAUSE;
5659                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5660                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5661
5662                         tp->link_config.rmt_adv =
5663                                            mii_adv_to_ethtool_adv_x(remote_adv);
5664
5665                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5666
5667                         current_link_up = true;
5668                 }
5669                 for (i = 0; i < 30; i++) {
5670                         udelay(20);
5671                         tw32_f(MAC_STATUS,
5672                                (MAC_STATUS_SYNC_CHANGED |
5673                                 MAC_STATUS_CFG_CHANGED));
5674                         udelay(40);
5675                         if ((tr32(MAC_STATUS) &
5676                              (MAC_STATUS_SYNC_CHANGED |
5677                               MAC_STATUS_CFG_CHANGED)) == 0)
5678                                 break;
5679                 }
5680
5681                 mac_status = tr32(MAC_STATUS);
5682                 if (!current_link_up &&
5683                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5684                     !(mac_status & MAC_STATUS_RCVD_CFG))
5685                         current_link_up = true;
5686         } else {
5687                 tg3_setup_flow_control(tp, 0, 0);
5688
5689                 /* Forcing 1000FD link up. */
5690                 current_link_up = true;
5691
5692                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5693                 udelay(40);
5694
5695                 tw32_f(MAC_MODE, tp->mac_mode);
5696                 udelay(40);
5697         }
5698
5699 out:
5700         return current_link_up;
5701 }
5702
5703 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5704 {
5705         u32 orig_pause_cfg;
5706         u16 orig_active_speed;
5707         u8 orig_active_duplex;
5708         u32 mac_status;
5709         bool current_link_up;
5710         int i;
5711
5712         orig_pause_cfg = tp->link_config.active_flowctrl;
5713         orig_active_speed = tp->link_config.active_speed;
5714         orig_active_duplex = tp->link_config.active_duplex;
5715
5716         if (!tg3_flag(tp, HW_AUTONEG) &&
5717             tp->link_up &&
5718             tg3_flag(tp, INIT_COMPLETE)) {
5719                 mac_status = tr32(MAC_STATUS);
5720                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5721                                MAC_STATUS_SIGNAL_DET |
5722                                MAC_STATUS_CFG_CHANGED |
5723                                MAC_STATUS_RCVD_CFG);
5724                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5725                                    MAC_STATUS_SIGNAL_DET)) {
5726                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5727                                             MAC_STATUS_CFG_CHANGED));
5728                         return 0;
5729                 }
5730         }
5731
5732         tw32_f(MAC_TX_AUTO_NEG, 0);
5733
5734         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5735         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5736         tw32_f(MAC_MODE, tp->mac_mode);
5737         udelay(40);
5738
5739         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5740                 tg3_init_bcm8002(tp);
5741
5742         /* Enable link change event even when serdes polling.  */
5743         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5744         udelay(40);
5745
5746         current_link_up = false;
5747         tp->link_config.rmt_adv = 0;
5748         mac_status = tr32(MAC_STATUS);
5749
5750         if (tg3_flag(tp, HW_AUTONEG))
5751                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5752         else
5753                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5754
5755         tp->napi[0].hw_status->status =
5756                 (SD_STATUS_UPDATED |
5757                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5758
5759         for (i = 0; i < 100; i++) {
5760                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5761                                     MAC_STATUS_CFG_CHANGED));
5762                 udelay(5);
5763                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5764                                          MAC_STATUS_CFG_CHANGED |
5765                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5766                         break;
5767         }
5768
5769         mac_status = tr32(MAC_STATUS);
5770         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5771                 current_link_up = false;
5772                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5773                     tp->serdes_counter == 0) {
5774                         tw32_f(MAC_MODE, (tp->mac_mode |
5775                                           MAC_MODE_SEND_CONFIGS));
5776                         udelay(1);
5777                         tw32_f(MAC_MODE, tp->mac_mode);
5778                 }
5779         }
5780
5781         if (current_link_up) {
5782                 tp->link_config.active_speed = SPEED_1000;
5783                 tp->link_config.active_duplex = DUPLEX_FULL;
5784                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5785                                     LED_CTRL_LNKLED_OVERRIDE |
5786                                     LED_CTRL_1000MBPS_ON));
5787         } else {
5788                 tp->link_config.active_speed = SPEED_UNKNOWN;
5789                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5790                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5791                                     LED_CTRL_LNKLED_OVERRIDE |
5792                                     LED_CTRL_TRAFFIC_OVERRIDE));
5793         }
5794
5795         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5796                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5797                 if (orig_pause_cfg != now_pause_cfg ||
5798                     orig_active_speed != tp->link_config.active_speed ||
5799                     orig_active_duplex != tp->link_config.active_duplex)
5800                         tg3_link_report(tp);
5801         }
5802
5803         return 0;
5804 }
5805
5806 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5807 {
5808         int err = 0;
5809         u32 bmsr, bmcr;
5810         u16 current_speed = SPEED_UNKNOWN;
5811         u8 current_duplex = DUPLEX_UNKNOWN;
5812         bool current_link_up = false;
5813         u32 local_adv, remote_adv, sgsr;
5814
5815         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5816              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5817              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5818              (sgsr & SERDES_TG3_SGMII_MODE)) {
5819
5820                 if (force_reset)
5821                         tg3_phy_reset(tp);
5822
5823                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5824
5825                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5826                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5827                 } else {
5828                         current_link_up = true;
5829                         if (sgsr & SERDES_TG3_SPEED_1000) {
5830                                 current_speed = SPEED_1000;
5831                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5832                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5833                                 current_speed = SPEED_100;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5835                         } else {
5836                                 current_speed = SPEED_10;
5837                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838                         }
5839
5840                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5841                                 current_duplex = DUPLEX_FULL;
5842                         else
5843                                 current_duplex = DUPLEX_HALF;
5844                 }
5845
5846                 tw32_f(MAC_MODE, tp->mac_mode);
5847                 udelay(40);
5848
5849                 tg3_clear_mac_status(tp);
5850
5851                 goto fiber_setup_done;
5852         }
5853
5854         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5855         tw32_f(MAC_MODE, tp->mac_mode);
5856         udelay(40);
5857
5858         tg3_clear_mac_status(tp);
5859
5860         if (force_reset)
5861                 tg3_phy_reset(tp);
5862
5863         tp->link_config.rmt_adv = 0;
5864
5865         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5866         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5867         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5868                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5869                         bmsr |= BMSR_LSTATUS;
5870                 else
5871                         bmsr &= ~BMSR_LSTATUS;
5872         }
5873
5874         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5875
5876         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5877             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5878                 /* do nothing, just check for link up at the end */
5879         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5880                 u32 adv, newadv;
5881
5882                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5883                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5884                                  ADVERTISE_1000XPAUSE |
5885                                  ADVERTISE_1000XPSE_ASYM |
5886                                  ADVERTISE_SLCT);
5887
5888                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5889                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5890
5891                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5892                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5893                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5894                         tg3_writephy(tp, MII_BMCR, bmcr);
5895
5896                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5897                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5898                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5899
5900                         return err;
5901                 }
5902         } else {
5903                 u32 new_bmcr;
5904
5905                 bmcr &= ~BMCR_SPEED1000;
5906                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5907
5908                 if (tp->link_config.duplex == DUPLEX_FULL)
5909                         new_bmcr |= BMCR_FULLDPLX;
5910
5911                 if (new_bmcr != bmcr) {
5912                         /* BMCR_SPEED1000 is a reserved bit that needs
5913                          * to be set on write.
5914                          */
5915                         new_bmcr |= BMCR_SPEED1000;
5916
5917                         /* Force a linkdown */
5918                         if (tp->link_up) {
5919                                 u32 adv;
5920
5921                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5922                                 adv &= ~(ADVERTISE_1000XFULL |
5923                                          ADVERTISE_1000XHALF |
5924                                          ADVERTISE_SLCT);
5925                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5926                                 tg3_writephy(tp, MII_BMCR, bmcr |
5927                                                            BMCR_ANRESTART |
5928                                                            BMCR_ANENABLE);
5929                                 udelay(10);
5930                                 tg3_carrier_off(tp);
5931                         }
5932                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5933                         bmcr = new_bmcr;
5934                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5935                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5936                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5937                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5938                                         bmsr |= BMSR_LSTATUS;
5939                                 else
5940                                         bmsr &= ~BMSR_LSTATUS;
5941                         }
5942                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5943                 }
5944         }
5945
5946         if (bmsr & BMSR_LSTATUS) {
5947                 current_speed = SPEED_1000;
5948                 current_link_up = true;
5949                 if (bmcr & BMCR_FULLDPLX)
5950                         current_duplex = DUPLEX_FULL;
5951                 else
5952                         current_duplex = DUPLEX_HALF;
5953
5954                 local_adv = 0;
5955                 remote_adv = 0;
5956
5957                 if (bmcr & BMCR_ANENABLE) {
5958                         u32 common;
5959
5960                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5961                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5962                         common = local_adv & remote_adv;
5963                         if (common & (ADVERTISE_1000XHALF |
5964                                       ADVERTISE_1000XFULL)) {
5965                                 if (common & ADVERTISE_1000XFULL)
5966                                         current_duplex = DUPLEX_FULL;
5967                                 else
5968                                         current_duplex = DUPLEX_HALF;
5969
5970                                 tp->link_config.rmt_adv =
5971                                            mii_adv_to_ethtool_adv_x(remote_adv);
5972                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5973                                 /* Link is up via parallel detect */
5974                         } else {
5975                                 current_link_up = false;
5976                         }
5977                 }
5978         }
5979
5980 fiber_setup_done:
5981         if (current_link_up && current_duplex == DUPLEX_FULL)
5982                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5983
5984         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5985         if (tp->link_config.active_duplex == DUPLEX_HALF)
5986                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5987
5988         tw32_f(MAC_MODE, tp->mac_mode);
5989         udelay(40);
5990
5991         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5992
5993         tp->link_config.active_speed = current_speed;
5994         tp->link_config.active_duplex = current_duplex;
5995
5996         tg3_test_and_report_link_chg(tp, current_link_up);
5997         return err;
5998 }
5999
6000 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6001 {
6002         if (tp->serdes_counter) {
6003                 /* Give autoneg time to complete. */
6004                 tp->serdes_counter--;
6005                 return;
6006         }
6007
6008         if (!tp->link_up &&
6009             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6010                 u32 bmcr;
6011
6012                 tg3_readphy(tp, MII_BMCR, &bmcr);
6013                 if (bmcr & BMCR_ANENABLE) {
6014                         u32 phy1, phy2;
6015
6016                         /* Select shadow register 0x1f */
6017                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6018                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6019
6020                         /* Select expansion interrupt status register */
6021                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6022                                          MII_TG3_DSP_EXP1_INT_STAT);
6023                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6025
6026                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6027                                 /* We have signal detect and not receiving
6028                                  * config code words, link is up by parallel
6029                                  * detection.
6030                                  */
6031
6032                                 bmcr &= ~BMCR_ANENABLE;
6033                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6034                                 tg3_writephy(tp, MII_BMCR, bmcr);
6035                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6036                         }
6037                 }
6038         } else if (tp->link_up &&
6039                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6040                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6041                 u32 phy2;
6042
6043                 /* Select expansion interrupt status register */
6044                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6045                                  MII_TG3_DSP_EXP1_INT_STAT);
6046                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6047                 if (phy2 & 0x20) {
6048                         u32 bmcr;
6049
6050                         /* Config code words received, turn on autoneg. */
6051                         tg3_readphy(tp, MII_BMCR, &bmcr);
6052                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6053
6054                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6055
6056                 }
6057         }
6058 }
6059
6060 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6061 {
6062         u32 val;
6063         int err;
6064
6065         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6066                 err = tg3_setup_fiber_phy(tp, force_reset);
6067         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6068                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6069         else
6070                 err = tg3_setup_copper_phy(tp, force_reset);
6071
6072         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6073                 u32 scale;
6074
6075                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6076                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6077                         scale = 65;
6078                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6079                         scale = 6;
6080                 else
6081                         scale = 12;
6082
6083                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6084                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6085                 tw32(GRC_MISC_CFG, val);
6086         }
6087
6088         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6089               (6 << TX_LENGTHS_IPG_SHIFT);
6090         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6091             tg3_asic_rev(tp) == ASIC_REV_5762)
6092                 val |= tr32(MAC_TX_LENGTHS) &
6093                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6094                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6095
6096         if (tp->link_config.active_speed == SPEED_1000 &&
6097             tp->link_config.active_duplex == DUPLEX_HALF)
6098                 tw32(MAC_TX_LENGTHS, val |
6099                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6100         else
6101                 tw32(MAC_TX_LENGTHS, val |
6102                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6103
6104         if (!tg3_flag(tp, 5705_PLUS)) {
6105                 if (tp->link_up) {
6106                         tw32(HOSTCC_STAT_COAL_TICKS,
6107                              tp->coal.stats_block_coalesce_usecs);
6108                 } else {
6109                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6110                 }
6111         }
6112
6113         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6114                 val = tr32(PCIE_PWR_MGMT_THRESH);
6115                 if (!tp->link_up)
6116                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6117                               tp->pwrmgmt_thresh;
6118                 else
6119                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6120                 tw32(PCIE_PWR_MGMT_THRESH, val);
6121         }
6122
6123         return err;
6124 }
6125
6126 /* tp->lock must be held */
6127 static u64 tg3_refclk_read(struct tg3 *tp)
6128 {
6129         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6130         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6131 }
6132
6133 /* tp->lock must be held */
6134 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6135 {
6136         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6137
6138         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6139         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6140         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6141         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6142 }
6143
6144 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6145 static inline void tg3_full_unlock(struct tg3 *tp);
6146 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6147 {
6148         struct tg3 *tp = netdev_priv(dev);
6149
6150         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6151                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6152                                 SOF_TIMESTAMPING_SOFTWARE;
6153
6154         if (tg3_flag(tp, PTP_CAPABLE)) {
6155                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6156                                         SOF_TIMESTAMPING_RX_HARDWARE |
6157                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6158         }
6159
6160         if (tp->ptp_clock)
6161                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6162         else
6163                 info->phc_index = -1;
6164
6165         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6166
6167         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6168                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6169                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6170                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6171         return 0;
6172 }
6173
6174 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6175 {
6176         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6177         bool neg_adj = false;
6178         u32 correction = 0;
6179
6180         if (ppb < 0) {
6181                 neg_adj = true;
6182                 ppb = -ppb;
6183         }
6184
6185         /* Frequency adjustment is performed using hardware with a 24 bit
6186          * accumulator and a programmable correction value. On each clk, the
6187          * correction value gets added to the accumulator and when it
6188          * overflows, the time counter is incremented/decremented.
6189          *
6190          * So conversion from ppb to correction value is
6191          *              ppb * (1 << 24) / 1000000000
6192          */
6193         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6194                      TG3_EAV_REF_CLK_CORRECT_MASK;
6195
6196         tg3_full_lock(tp, 0);
6197
6198         if (correction)
6199                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200                      TG3_EAV_REF_CLK_CORRECT_EN |
6201                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6202         else
6203                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6204
6205         tg3_full_unlock(tp);
6206
6207         return 0;
6208 }
6209
6210 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6211 {
6212         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6213
6214         tg3_full_lock(tp, 0);
6215         tp->ptp_adjust += delta;
6216         tg3_full_unlock(tp);
6217
6218         return 0;
6219 }
6220
6221 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6222 {
6223         u64 ns;
6224         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6225
6226         tg3_full_lock(tp, 0);
6227         ns = tg3_refclk_read(tp);
6228         ns += tp->ptp_adjust;
6229         tg3_full_unlock(tp);
6230
6231         *ts = ns_to_timespec64(ns);
6232
6233         return 0;
6234 }
6235
6236 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6237                            const struct timespec64 *ts)
6238 {
6239         u64 ns;
6240         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6241
6242         ns = timespec64_to_ns(ts);
6243
6244         tg3_full_lock(tp, 0);
6245         tg3_refclk_write(tp, ns);
6246         tp->ptp_adjust = 0;
6247         tg3_full_unlock(tp);
6248
6249         return 0;
6250 }
6251
6252 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6253                           struct ptp_clock_request *rq, int on)
6254 {
6255         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6256         u32 clock_ctl;
6257         int rval = 0;
6258
6259         switch (rq->type) {
6260         case PTP_CLK_REQ_PEROUT:
6261                 if (rq->perout.index != 0)
6262                         return -EINVAL;
6263
6264                 tg3_full_lock(tp, 0);
6265                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6266                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6267
6268                 if (on) {
6269                         u64 nsec;
6270
6271                         nsec = rq->perout.start.sec * 1000000000ULL +
6272                                rq->perout.start.nsec;
6273
6274                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6275                                 netdev_warn(tp->dev,
6276                                             "Device supports only a one-shot timesync output, period must be 0\n");
6277                                 rval = -EINVAL;
6278                                 goto err_out;
6279                         }
6280
6281                         if (nsec & (1ULL << 63)) {
6282                                 netdev_warn(tp->dev,
6283                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6284                                 rval = -EINVAL;
6285                                 goto err_out;
6286                         }
6287
6288                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6289                         tw32(TG3_EAV_WATCHDOG0_MSB,
6290                              TG3_EAV_WATCHDOG0_EN |
6291                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6292
6293                         tw32(TG3_EAV_REF_CLCK_CTL,
6294                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6295                 } else {
6296                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6297                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6298                 }
6299
6300 err_out:
6301                 tg3_full_unlock(tp);
6302                 return rval;
6303
6304         default:
6305                 break;
6306         }
6307
6308         return -EOPNOTSUPP;
6309 }
6310
6311 static const struct ptp_clock_info tg3_ptp_caps = {
6312         .owner          = THIS_MODULE,
6313         .name           = "tg3 clock",
6314         .max_adj        = 250000000,
6315         .n_alarm        = 0,
6316         .n_ext_ts       = 0,
6317         .n_per_out      = 1,
6318         .n_pins         = 0,
6319         .pps            = 0,
6320         .adjfreq        = tg3_ptp_adjfreq,
6321         .adjtime        = tg3_ptp_adjtime,
6322         .gettime64      = tg3_ptp_gettime,
6323         .settime64      = tg3_ptp_settime,
6324         .enable         = tg3_ptp_enable,
6325 };
6326
6327 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6328                                      struct skb_shared_hwtstamps *timestamp)
6329 {
6330         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6331         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6332                                            tp->ptp_adjust);
6333 }
6334
6335 /* tp->lock must be held */
6336 static void tg3_ptp_init(struct tg3 *tp)
6337 {
6338         if (!tg3_flag(tp, PTP_CAPABLE))
6339                 return;
6340
6341         /* Initialize the hardware clock to the system time. */
6342         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6343         tp->ptp_adjust = 0;
6344         tp->ptp_info = tg3_ptp_caps;
6345 }
6346
6347 /* tp->lock must be held */
6348 static void tg3_ptp_resume(struct tg3 *tp)
6349 {
6350         if (!tg3_flag(tp, PTP_CAPABLE))
6351                 return;
6352
6353         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6354         tp->ptp_adjust = 0;
6355 }
6356
6357 static void tg3_ptp_fini(struct tg3 *tp)
6358 {
6359         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6360                 return;
6361
6362         ptp_clock_unregister(tp->ptp_clock);
6363         tp->ptp_clock = NULL;
6364         tp->ptp_adjust = 0;
6365 }
6366
6367 static inline int tg3_irq_sync(struct tg3 *tp)
6368 {
6369         return tp->irq_sync;
6370 }
6371
6372 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6373 {
6374         int i;
6375
6376         dst = (u32 *)((u8 *)dst + off);
6377         for (i = 0; i < len; i += sizeof(u32))
6378                 *dst++ = tr32(off + i);
6379 }
6380
6381 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6382 {
6383         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6384         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6385         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6386         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6387         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6388         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6389         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6390         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6391         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6392         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6393         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6394         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6395         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6396         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6397         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6398         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6399         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6400         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6401         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6402
6403         if (tg3_flag(tp, SUPPORT_MSIX))
6404                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6405
6406         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6407         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6408         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6409         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6410         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6411         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6412         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6413         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6414
6415         if (!tg3_flag(tp, 5705_PLUS)) {
6416                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6417                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6418                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6419         }
6420
6421         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6422         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6423         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6424         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6425         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6426
6427         if (tg3_flag(tp, NVRAM))
6428                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6429 }
6430
6431 static void tg3_dump_state(struct tg3 *tp)
6432 {
6433         int i;
6434         u32 *regs;
6435
6436         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6437         if (!regs)
6438                 return;
6439
6440         if (tg3_flag(tp, PCI_EXPRESS)) {
6441                 /* Read up to but not including private PCI registers */
6442                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6443                         regs[i / sizeof(u32)] = tr32(i);
6444         } else
6445                 tg3_dump_legacy_regs(tp, regs);
6446
6447         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6448                 if (!regs[i + 0] && !regs[i + 1] &&
6449                     !regs[i + 2] && !regs[i + 3])
6450                         continue;
6451
6452                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6453                            i * 4,
6454                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6455         }
6456
6457         kfree(regs);
6458
6459         for (i = 0; i < tp->irq_cnt; i++) {
6460                 struct tg3_napi *tnapi = &tp->napi[i];
6461
6462                 /* SW status block */
6463                 netdev_err(tp->dev,
6464                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6465                            i,
6466                            tnapi->hw_status->status,
6467                            tnapi->hw_status->status_tag,
6468                            tnapi->hw_status->rx_jumbo_consumer,
6469                            tnapi->hw_status->rx_consumer,
6470                            tnapi->hw_status->rx_mini_consumer,
6471                            tnapi->hw_status->idx[0].rx_producer,
6472                            tnapi->hw_status->idx[0].tx_consumer);
6473
6474                 netdev_err(tp->dev,
6475                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6476                            i,
6477                            tnapi->last_tag, tnapi->last_irq_tag,
6478                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6479                            tnapi->rx_rcb_ptr,
6480                            tnapi->prodring.rx_std_prod_idx,
6481                            tnapi->prodring.rx_std_cons_idx,
6482                            tnapi->prodring.rx_jmb_prod_idx,
6483                            tnapi->prodring.rx_jmb_cons_idx);
6484         }
6485 }
6486
6487 /* This is called whenever we suspect that the system chipset is re-
6488  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6489  * is bogus tx completions. We try to recover by setting the
6490  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6491  * in the workqueue.
6492  */
6493 static void tg3_tx_recover(struct tg3 *tp)
6494 {
6495         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6496                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6497
6498         netdev_warn(tp->dev,
6499                     "The system may be re-ordering memory-mapped I/O "
6500                     "cycles to the network device, attempting to recover. "
6501                     "Please report the problem to the driver maintainer "
6502                     "and include system chipset information.\n");
6503
6504         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6505 }
6506
6507 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6508 {
6509         /* Tell compiler to fetch tx indices from memory. */
6510         barrier();
6511         return tnapi->tx_pending -
6512                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6513 }
6514
6515 /* Tigon3 never reports partial packet sends.  So we do not
6516  * need special logic to handle SKBs that have not had all
6517  * of their frags sent yet, like SunGEM does.
6518  */
6519 static void tg3_tx(struct tg3_napi *tnapi)
6520 {
6521         struct tg3 *tp = tnapi->tp;
6522         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6523         u32 sw_idx = tnapi->tx_cons;
6524         struct netdev_queue *txq;
6525         int index = tnapi - tp->napi;
6526         unsigned int pkts_compl = 0, bytes_compl = 0;
6527
6528         if (tg3_flag(tp, ENABLE_TSS))
6529                 index--;
6530
6531         txq = netdev_get_tx_queue(tp->dev, index);
6532
6533         while (sw_idx != hw_idx) {
6534                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6535                 struct sk_buff *skb = ri->skb;
6536                 int i, tx_bug = 0;
6537
6538                 if (unlikely(skb == NULL)) {
6539                         tg3_tx_recover(tp);
6540                         return;
6541                 }
6542
6543                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6544                         struct skb_shared_hwtstamps timestamp;
6545                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6546                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6547
6548                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6549
6550                         skb_tstamp_tx(skb, &timestamp);
6551                 }
6552
6553                 pci_unmap_single(tp->pdev,
6554                                  dma_unmap_addr(ri, mapping),
6555                                  skb_headlen(skb),
6556                                  PCI_DMA_TODEVICE);
6557
6558                 ri->skb = NULL;
6559
6560                 while (ri->fragmented) {
6561                         ri->fragmented = false;
6562                         sw_idx = NEXT_TX(sw_idx);
6563                         ri = &tnapi->tx_buffers[sw_idx];
6564                 }
6565
6566                 sw_idx = NEXT_TX(sw_idx);
6567
6568                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6569                         ri = &tnapi->tx_buffers[sw_idx];
6570                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6571                                 tx_bug = 1;
6572
6573                         pci_unmap_page(tp->pdev,
6574                                        dma_unmap_addr(ri, mapping),
6575                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6576                                        PCI_DMA_TODEVICE);
6577
6578                         while (ri->fragmented) {
6579                                 ri->fragmented = false;
6580                                 sw_idx = NEXT_TX(sw_idx);
6581                                 ri = &tnapi->tx_buffers[sw_idx];
6582                         }
6583
6584                         sw_idx = NEXT_TX(sw_idx);
6585                 }
6586
6587                 pkts_compl++;
6588                 bytes_compl += skb->len;
6589
6590                 dev_consume_skb_any(skb);
6591
6592                 if (unlikely(tx_bug)) {
6593                         tg3_tx_recover(tp);
6594                         return;
6595                 }
6596         }
6597
6598         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6599
6600         tnapi->tx_cons = sw_idx;
6601
6602         /* Need to make the tx_cons update visible to tg3_start_xmit()
6603          * before checking for netif_queue_stopped().  Without the
6604          * memory barrier, there is a small possibility that tg3_start_xmit()
6605          * will miss it and cause the queue to be stopped forever.
6606          */
6607         smp_mb();
6608
6609         if (unlikely(netif_tx_queue_stopped(txq) &&
6610                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6611                 __netif_tx_lock(txq, smp_processor_id());
6612                 if (netif_tx_queue_stopped(txq) &&
6613                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6614                         netif_tx_wake_queue(txq);
6615                 __netif_tx_unlock(txq);
6616         }
6617 }
6618
6619 static void tg3_frag_free(bool is_frag, void *data)
6620 {
6621         if (is_frag)
6622                 skb_free_frag(data);
6623         else
6624                 kfree(data);
6625 }
6626
6627 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6628 {
6629         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6630                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6631
6632         if (!ri->data)
6633                 return;
6634
6635         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6636                          map_sz, PCI_DMA_FROMDEVICE);
6637         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6638         ri->data = NULL;
6639 }
6640
6641
6642 /* Returns size of skb allocated or < 0 on error.
6643  *
6644  * We only need to fill in the address because the other members
6645  * of the RX descriptor are invariant, see tg3_init_rings.
6646  *
6647  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6648  * posting buffers we only dirty the first cache line of the RX
6649  * descriptor (containing the address).  Whereas for the RX status
6650  * buffers the cpu only reads the last cacheline of the RX descriptor
6651  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6652  */
6653 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6654                              u32 opaque_key, u32 dest_idx_unmasked,
6655                              unsigned int *frag_size)
6656 {
6657         struct tg3_rx_buffer_desc *desc;
6658         struct ring_info *map;
6659         u8 *data;
6660         dma_addr_t mapping;
6661         int skb_size, data_size, dest_idx;
6662
6663         switch (opaque_key) {
6664         case RXD_OPAQUE_RING_STD:
6665                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6666                 desc = &tpr->rx_std[dest_idx];
6667                 map = &tpr->rx_std_buffers[dest_idx];
6668                 data_size = tp->rx_pkt_map_sz;
6669                 break;
6670
6671         case RXD_OPAQUE_RING_JUMBO:
6672                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6673                 desc = &tpr->rx_jmb[dest_idx].std;
6674                 map = &tpr->rx_jmb_buffers[dest_idx];
6675                 data_size = TG3_RX_JMB_MAP_SZ;
6676                 break;
6677
6678         default:
6679                 return -EINVAL;
6680         }
6681
6682         /* Do not overwrite any of the map or rp information
6683          * until we are sure we can commit to a new buffer.
6684          *
6685          * Callers depend upon this behavior and assume that
6686          * we leave everything unchanged if we fail.
6687          */
6688         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6689                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6690         if (skb_size <= PAGE_SIZE) {
6691                 data = netdev_alloc_frag(skb_size);
6692                 *frag_size = skb_size;
6693         } else {
6694                 data = kmalloc(skb_size, GFP_ATOMIC);
6695                 *frag_size = 0;
6696         }
6697         if (!data)
6698                 return -ENOMEM;
6699
6700         mapping = pci_map_single(tp->pdev,
6701                                  data + TG3_RX_OFFSET(tp),
6702                                  data_size,
6703                                  PCI_DMA_FROMDEVICE);
6704         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6705                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6706                 return -EIO;
6707         }
6708
6709         map->data = data;
6710         dma_unmap_addr_set(map, mapping, mapping);
6711
6712         desc->addr_hi = ((u64)mapping >> 32);
6713         desc->addr_lo = ((u64)mapping & 0xffffffff);
6714
6715         return data_size;
6716 }
6717
6718 /* We only need to move over in the address because the other
6719  * members of the RX descriptor are invariant.  See notes above
6720  * tg3_alloc_rx_data for full details.
6721  */
6722 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6723                            struct tg3_rx_prodring_set *dpr,
6724                            u32 opaque_key, int src_idx,
6725                            u32 dest_idx_unmasked)
6726 {
6727         struct tg3 *tp = tnapi->tp;
6728         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6729         struct ring_info *src_map, *dest_map;
6730         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6731         int dest_idx;
6732
6733         switch (opaque_key) {
6734         case RXD_OPAQUE_RING_STD:
6735                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6736                 dest_desc = &dpr->rx_std[dest_idx];
6737                 dest_map = &dpr->rx_std_buffers[dest_idx];
6738                 src_desc = &spr->rx_std[src_idx];
6739                 src_map = &spr->rx_std_buffers[src_idx];
6740                 break;
6741
6742         case RXD_OPAQUE_RING_JUMBO:
6743                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6744                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6745                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6746                 src_desc = &spr->rx_jmb[src_idx].std;
6747                 src_map = &spr->rx_jmb_buffers[src_idx];
6748                 break;
6749
6750         default:
6751                 return;
6752         }
6753
6754         dest_map->data = src_map->data;
6755         dma_unmap_addr_set(dest_map, mapping,
6756                            dma_unmap_addr(src_map, mapping));
6757         dest_desc->addr_hi = src_desc->addr_hi;
6758         dest_desc->addr_lo = src_desc->addr_lo;
6759
6760         /* Ensure that the update to the skb happens after the physical
6761          * addresses have been transferred to the new BD location.
6762          */
6763         smp_wmb();
6764
6765         src_map->data = NULL;
6766 }
6767
6768 /* The RX ring scheme is composed of multiple rings which post fresh
6769  * buffers to the chip, and one special ring the chip uses to report
6770  * status back to the host.
6771  *
6772  * The special ring reports the status of received packets to the
6773  * host.  The chip does not write into the original descriptor the
6774  * RX buffer was obtained from.  The chip simply takes the original
6775  * descriptor as provided by the host, updates the status and length
6776  * field, then writes this into the next status ring entry.
6777  *
6778  * Each ring the host uses to post buffers to the chip is described
6779  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6780  * it is first placed into the on-chip ram.  When the packet's length
6781  * is known, it walks down the TG3_BDINFO entries to select the ring.
6782  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6783  * which is within the range of the new packet's length is chosen.
6784  *
6785  * The "separate ring for rx status" scheme may sound queer, but it makes
6786  * sense from a cache coherency perspective.  If only the host writes
6787  * to the buffer post rings, and only the chip writes to the rx status
6788  * rings, then cache lines never move beyond shared-modified state.
6789  * If both the host and chip were to write into the same ring, cache line
6790  * eviction could occur since both entities want it in an exclusive state.
6791  */
6792 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6793 {
6794         struct tg3 *tp = tnapi->tp;
6795         u32 work_mask, rx_std_posted = 0;
6796         u32 std_prod_idx, jmb_prod_idx;
6797         u32 sw_idx = tnapi->rx_rcb_ptr;
6798         u16 hw_idx;
6799         int received;
6800         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6801
6802         hw_idx = *(tnapi->rx_rcb_prod_idx);
6803         /*
6804          * We need to order the read of hw_idx and the read of
6805          * the opaque cookie.
6806          */
6807         rmb();
6808         work_mask = 0;
6809         received = 0;
6810         std_prod_idx = tpr->rx_std_prod_idx;
6811         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6812         while (sw_idx != hw_idx && budget > 0) {
6813                 struct ring_info *ri;
6814                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6815                 unsigned int len;
6816                 struct sk_buff *skb;
6817                 dma_addr_t dma_addr;
6818                 u32 opaque_key, desc_idx, *post_ptr;
6819                 u8 *data;
6820                 u64 tstamp = 0;
6821
6822                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6823                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6824                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6825                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6826                         dma_addr = dma_unmap_addr(ri, mapping);
6827                         data = ri->data;
6828                         post_ptr = &std_prod_idx;
6829                         rx_std_posted++;
6830                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6831                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6832                         dma_addr = dma_unmap_addr(ri, mapping);
6833                         data = ri->data;
6834                         post_ptr = &jmb_prod_idx;
6835                 } else
6836                         goto next_pkt_nopost;
6837
6838                 work_mask |= opaque_key;
6839
6840                 if (desc->err_vlan & RXD_ERR_MASK) {
6841                 drop_it:
6842                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6843                                        desc_idx, *post_ptr);
6844                 drop_it_no_recycle:
6845                         /* Other statistics kept track of by card. */
6846                         tp->rx_dropped++;
6847                         goto next_pkt;
6848                 }
6849
6850                 prefetch(data + TG3_RX_OFFSET(tp));
6851                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6852                       ETH_FCS_LEN;
6853
6854                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6855                      RXD_FLAG_PTPSTAT_PTPV1 ||
6856                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6857                      RXD_FLAG_PTPSTAT_PTPV2) {
6858                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6859                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6860                 }
6861
6862                 if (len > TG3_RX_COPY_THRESH(tp)) {
6863                         int skb_size;
6864                         unsigned int frag_size;
6865
6866                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6867                                                     *post_ptr, &frag_size);
6868                         if (skb_size < 0)
6869                                 goto drop_it;
6870
6871                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6872                                          PCI_DMA_FROMDEVICE);
6873
6874                         /* Ensure that the update to the data happens
6875                          * after the usage of the old DMA mapping.
6876                          */
6877                         smp_wmb();
6878
6879                         ri->data = NULL;
6880
6881                         skb = build_skb(data, frag_size);
6882                         if (!skb) {
6883                                 tg3_frag_free(frag_size != 0, data);
6884                                 goto drop_it_no_recycle;
6885                         }
6886                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6887                 } else {
6888                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6889                                        desc_idx, *post_ptr);
6890
6891                         skb = netdev_alloc_skb(tp->dev,
6892                                                len + TG3_RAW_IP_ALIGN);
6893                         if (skb == NULL)
6894                                 goto drop_it_no_recycle;
6895
6896                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6897                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6898                         memcpy(skb->data,
6899                                data + TG3_RX_OFFSET(tp),
6900                                len);
6901                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6902                 }
6903
6904                 skb_put(skb, len);
6905                 if (tstamp)
6906                         tg3_hwclock_to_timestamp(tp, tstamp,
6907                                                  skb_hwtstamps(skb));
6908
6909                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6910                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6911                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6912                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6913                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6914                 else
6915                         skb_checksum_none_assert(skb);
6916
6917                 skb->protocol = eth_type_trans(skb, tp->dev);
6918
6919                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6920                     skb->protocol != htons(ETH_P_8021Q) &&
6921                     skb->protocol != htons(ETH_P_8021AD)) {
6922                         dev_kfree_skb_any(skb);
6923                         goto drop_it_no_recycle;
6924                 }
6925
6926                 if (desc->type_flags & RXD_FLAG_VLAN &&
6927                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6928                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6929                                                desc->err_vlan & RXD_VLAN_MASK);
6930
6931                 napi_gro_receive(&tnapi->napi, skb);
6932
6933                 received++;
6934                 budget--;
6935
6936 next_pkt:
6937                 (*post_ptr)++;
6938
6939                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6940                         tpr->rx_std_prod_idx = std_prod_idx &
6941                                                tp->rx_std_ring_mask;
6942                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6943                                      tpr->rx_std_prod_idx);
6944                         work_mask &= ~RXD_OPAQUE_RING_STD;
6945                         rx_std_posted = 0;
6946                 }
6947 next_pkt_nopost:
6948                 sw_idx++;
6949                 sw_idx &= tp->rx_ret_ring_mask;
6950
6951                 /* Refresh hw_idx to see if there is new work */
6952                 if (sw_idx == hw_idx) {
6953                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6954                         rmb();
6955                 }
6956         }
6957
6958         /* ACK the status ring. */
6959         tnapi->rx_rcb_ptr = sw_idx;
6960         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6961
6962         /* Refill RX ring(s). */
6963         if (!tg3_flag(tp, ENABLE_RSS)) {
6964                 /* Sync BD data before updating mailbox */
6965                 wmb();
6966
6967                 if (work_mask & RXD_OPAQUE_RING_STD) {
6968                         tpr->rx_std_prod_idx = std_prod_idx &
6969                                                tp->rx_std_ring_mask;
6970                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6971                                      tpr->rx_std_prod_idx);
6972                 }
6973                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6974                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6975                                                tp->rx_jmb_ring_mask;
6976                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6977                                      tpr->rx_jmb_prod_idx);
6978                 }
6979                 mmiowb();
6980         } else if (work_mask) {
6981                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6982                  * updated before the producer indices can be updated.
6983                  */
6984                 smp_wmb();
6985
6986                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6987                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6988
6989                 if (tnapi != &tp->napi[1]) {
6990                         tp->rx_refill = true;
6991                         napi_schedule(&tp->napi[1].napi);
6992                 }
6993         }
6994
6995         return received;
6996 }
6997
6998 static void tg3_poll_link(struct tg3 *tp)
6999 {
7000         /* handle link change and other phy events */
7001         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7002                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7003
7004                 if (sblk->status & SD_STATUS_LINK_CHG) {
7005                         sblk->status = SD_STATUS_UPDATED |
7006                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7007                         spin_lock(&tp->lock);
7008                         if (tg3_flag(tp, USE_PHYLIB)) {
7009                                 tw32_f(MAC_STATUS,
7010                                      (MAC_STATUS_SYNC_CHANGED |
7011                                       MAC_STATUS_CFG_CHANGED |
7012                                       MAC_STATUS_MI_COMPLETION |
7013                                       MAC_STATUS_LNKSTATE_CHANGED));
7014                                 udelay(40);
7015                         } else
7016                                 tg3_setup_phy(tp, false);
7017                         spin_unlock(&tp->lock);
7018                 }
7019         }
7020 }
7021
7022 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7023                                 struct tg3_rx_prodring_set *dpr,
7024                                 struct tg3_rx_prodring_set *spr)
7025 {
7026         u32 si, di, cpycnt, src_prod_idx;
7027         int i, err = 0;
7028
7029         while (1) {
7030                 src_prod_idx = spr->rx_std_prod_idx;
7031
7032                 /* Make sure updates to the rx_std_buffers[] entries and the
7033                  * standard producer index are seen in the correct order.
7034                  */
7035                 smp_rmb();
7036
7037                 if (spr->rx_std_cons_idx == src_prod_idx)
7038                         break;
7039
7040                 if (spr->rx_std_cons_idx < src_prod_idx)
7041                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7042                 else
7043                         cpycnt = tp->rx_std_ring_mask + 1 -
7044                                  spr->rx_std_cons_idx;
7045
7046                 cpycnt = min(cpycnt,
7047                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7048
7049                 si = spr->rx_std_cons_idx;
7050                 di = dpr->rx_std_prod_idx;
7051
7052                 for (i = di; i < di + cpycnt; i++) {
7053                         if (dpr->rx_std_buffers[i].data) {
7054                                 cpycnt = i - di;
7055                                 err = -ENOSPC;
7056                                 break;
7057                         }
7058                 }
7059
7060                 if (!cpycnt)
7061                         break;
7062
7063                 /* Ensure that updates to the rx_std_buffers ring and the
7064                  * shadowed hardware producer ring from tg3_recycle_skb() are
7065                  * ordered correctly WRT the skb check above.
7066                  */
7067                 smp_rmb();
7068
7069                 memcpy(&dpr->rx_std_buffers[di],
7070                        &spr->rx_std_buffers[si],
7071                        cpycnt * sizeof(struct ring_info));
7072
7073                 for (i = 0; i < cpycnt; i++, di++, si++) {
7074                         struct tg3_rx_buffer_desc *sbd, *dbd;
7075                         sbd = &spr->rx_std[si];
7076                         dbd = &dpr->rx_std[di];
7077                         dbd->addr_hi = sbd->addr_hi;
7078                         dbd->addr_lo = sbd->addr_lo;
7079                 }
7080
7081                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7082                                        tp->rx_std_ring_mask;
7083                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7084                                        tp->rx_std_ring_mask;
7085         }
7086
7087         while (1) {
7088                 src_prod_idx = spr->rx_jmb_prod_idx;
7089
7090                 /* Make sure updates to the rx_jmb_buffers[] entries and
7091                  * the jumbo producer index are seen in the correct order.
7092                  */
7093                 smp_rmb();
7094
7095                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7096                         break;
7097
7098                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7099                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7100                 else
7101                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7102                                  spr->rx_jmb_cons_idx;
7103
7104                 cpycnt = min(cpycnt,
7105                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7106
7107                 si = spr->rx_jmb_cons_idx;
7108                 di = dpr->rx_jmb_prod_idx;
7109
7110                 for (i = di; i < di + cpycnt; i++) {
7111                         if (dpr->rx_jmb_buffers[i].data) {
7112                                 cpycnt = i - di;
7113                                 err = -ENOSPC;
7114                                 break;
7115                         }
7116                 }
7117
7118                 if (!cpycnt)
7119                         break;
7120
7121                 /* Ensure that updates to the rx_jmb_buffers ring and the
7122                  * shadowed hardware producer ring from tg3_recycle_skb() are
7123                  * ordered correctly WRT the skb check above.
7124                  */
7125                 smp_rmb();
7126
7127                 memcpy(&dpr->rx_jmb_buffers[di],
7128                        &spr->rx_jmb_buffers[si],
7129                        cpycnt * sizeof(struct ring_info));
7130
7131                 for (i = 0; i < cpycnt; i++, di++, si++) {
7132                         struct tg3_rx_buffer_desc *sbd, *dbd;
7133                         sbd = &spr->rx_jmb[si].std;
7134                         dbd = &dpr->rx_jmb[di].std;
7135                         dbd->addr_hi = sbd->addr_hi;
7136                         dbd->addr_lo = sbd->addr_lo;
7137                 }
7138
7139                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7140                                        tp->rx_jmb_ring_mask;
7141                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7142                                        tp->rx_jmb_ring_mask;
7143         }
7144
7145         return err;
7146 }
7147
7148 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7149 {
7150         struct tg3 *tp = tnapi->tp;
7151
7152         /* run TX completion thread */
7153         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7154                 tg3_tx(tnapi);
7155                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7156                         return work_done;
7157         }
7158
7159         if (!tnapi->rx_rcb_prod_idx)
7160                 return work_done;
7161
7162         /* run RX thread, within the bounds set by NAPI.
7163          * All RX "locking" is done by ensuring outside
7164          * code synchronizes with tg3->napi.poll()
7165          */
7166         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7167                 work_done += tg3_rx(tnapi, budget - work_done);
7168
7169         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7170                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7171                 int i, err = 0;
7172                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7173                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7174
7175                 tp->rx_refill = false;
7176                 for (i = 1; i <= tp->rxq_cnt; i++)
7177                         err |= tg3_rx_prodring_xfer(tp, dpr,
7178                                                     &tp->napi[i].prodring);
7179
7180                 wmb();
7181
7182                 if (std_prod_idx != dpr->rx_std_prod_idx)
7183                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7184                                      dpr->rx_std_prod_idx);
7185
7186                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7187                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7188                                      dpr->rx_jmb_prod_idx);
7189
7190                 mmiowb();
7191
7192                 if (err)
7193                         tw32_f(HOSTCC_MODE, tp->coal_now);
7194         }
7195
7196         return work_done;
7197 }
7198
7199 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7200 {
7201         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7202                 schedule_work(&tp->reset_task);
7203 }
7204
7205 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7206 {
7207         cancel_work_sync(&tp->reset_task);
7208         tg3_flag_clear(tp, RESET_TASK_PENDING);
7209         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7210 }
7211
7212 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7213 {
7214         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7215         struct tg3 *tp = tnapi->tp;
7216         int work_done = 0;
7217         struct tg3_hw_status *sblk = tnapi->hw_status;
7218
7219         while (1) {
7220                 work_done = tg3_poll_work(tnapi, work_done, budget);
7221
7222                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7223                         goto tx_recovery;
7224
7225                 if (unlikely(work_done >= budget))
7226                         break;
7227
7228                 /* tp->last_tag is used in tg3_int_reenable() below
7229                  * to tell the hw how much work has been processed,
7230                  * so we must read it before checking for more work.
7231                  */
7232                 tnapi->last_tag = sblk->status_tag;
7233                 tnapi->last_irq_tag = tnapi->last_tag;
7234                 rmb();
7235
7236                 /* check for RX/TX work to do */
7237                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7238                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7239
7240                         /* This test here is not race free, but will reduce
7241                          * the number of interrupts by looping again.
7242                          */
7243                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7244                                 continue;
7245
7246                         napi_complete_done(napi, work_done);
7247                         /* Reenable interrupts. */
7248                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7249
7250                         /* This test here is synchronized by napi_schedule()
7251                          * and napi_complete() to close the race condition.
7252                          */
7253                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7254                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7255                                                   HOSTCC_MODE_ENABLE |
7256                                                   tnapi->coal_now);
7257                         }
7258                         mmiowb();
7259                         break;
7260                 }
7261         }
7262
7263         return work_done;
7264
7265 tx_recovery:
7266         /* work_done is guaranteed to be less than budget. */
7267         napi_complete(napi);
7268         tg3_reset_task_schedule(tp);
7269         return work_done;
7270 }
7271
7272 static void tg3_process_error(struct tg3 *tp)
7273 {
7274         u32 val;
7275         bool real_error = false;
7276
7277         if (tg3_flag(tp, ERROR_PROCESSED))
7278                 return;
7279
7280         /* Check Flow Attention register */
7281         val = tr32(HOSTCC_FLOW_ATTN);
7282         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7283                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7284                 real_error = true;
7285         }
7286
7287         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7288                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7289                 real_error = true;
7290         }
7291
7292         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7293                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7294                 real_error = true;
7295         }
7296
7297         if (!real_error)
7298                 return;
7299
7300         tg3_dump_state(tp);
7301
7302         tg3_flag_set(tp, ERROR_PROCESSED);
7303         tg3_reset_task_schedule(tp);
7304 }
7305
7306 static int tg3_poll(struct napi_struct *napi, int budget)
7307 {
7308         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7309         struct tg3 *tp = tnapi->tp;
7310         int work_done = 0;
7311         struct tg3_hw_status *sblk = tnapi->hw_status;
7312
7313         while (1) {
7314                 if (sblk->status & SD_STATUS_ERROR)
7315                         tg3_process_error(tp);
7316
7317                 tg3_poll_link(tp);
7318
7319                 work_done = tg3_poll_work(tnapi, work_done, budget);
7320
7321                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7322                         goto tx_recovery;
7323
7324                 if (unlikely(work_done >= budget))
7325                         break;
7326
7327                 if (tg3_flag(tp, TAGGED_STATUS)) {
7328                         /* tp->last_tag is used in tg3_int_reenable() below
7329                          * to tell the hw how much work has been processed,
7330                          * so we must read it before checking for more work.
7331                          */
7332                         tnapi->last_tag = sblk->status_tag;
7333                         tnapi->last_irq_tag = tnapi->last_tag;
7334                         rmb();
7335                 } else
7336                         sblk->status &= ~SD_STATUS_UPDATED;
7337
7338                 if (likely(!tg3_has_work(tnapi))) {
7339                         napi_complete_done(napi, work_done);
7340                         tg3_int_reenable(tnapi);
7341                         break;
7342                 }
7343         }
7344
7345         return work_done;
7346
7347 tx_recovery:
7348         /* work_done is guaranteed to be less than budget. */
7349         napi_complete(napi);
7350         tg3_reset_task_schedule(tp);
7351         return work_done;
7352 }
7353
7354 static void tg3_napi_disable(struct tg3 *tp)
7355 {
7356         int i;
7357
7358         for (i = tp->irq_cnt - 1; i >= 0; i--)
7359                 napi_disable(&tp->napi[i].napi);
7360 }
7361
7362 static void tg3_napi_enable(struct tg3 *tp)
7363 {
7364         int i;
7365
7366         for (i = 0; i < tp->irq_cnt; i++)
7367                 napi_enable(&tp->napi[i].napi);
7368 }
7369
7370 static void tg3_napi_init(struct tg3 *tp)
7371 {
7372         int i;
7373
7374         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7375         for (i = 1; i < tp->irq_cnt; i++)
7376                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7377 }
7378
7379 static void tg3_napi_fini(struct tg3 *tp)
7380 {
7381         int i;
7382
7383         for (i = 0; i < tp->irq_cnt; i++)
7384                 netif_napi_del(&tp->napi[i].napi);
7385 }
7386
7387 static inline void tg3_netif_stop(struct tg3 *tp)
7388 {
7389         netif_trans_update(tp->dev);    /* prevent tx timeout */
7390         tg3_napi_disable(tp);
7391         netif_carrier_off(tp->dev);
7392         netif_tx_disable(tp->dev);
7393 }
7394
7395 /* tp->lock must be held */
7396 static inline void tg3_netif_start(struct tg3 *tp)
7397 {
7398         tg3_ptp_resume(tp);
7399
7400         /* NOTE: unconditional netif_tx_wake_all_queues is only
7401          * appropriate so long as all callers are assured to
7402          * have free tx slots (such as after tg3_init_hw)
7403          */
7404         netif_tx_wake_all_queues(tp->dev);
7405
7406         if (tp->link_up)
7407                 netif_carrier_on(tp->dev);
7408
7409         tg3_napi_enable(tp);
7410         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7411         tg3_enable_ints(tp);
7412 }
7413
7414 static void tg3_irq_quiesce(struct tg3 *tp)
7415         __releases(tp->lock)
7416         __acquires(tp->lock)
7417 {
7418         int i;
7419
7420         BUG_ON(tp->irq_sync);
7421
7422         tp->irq_sync = 1;
7423         smp_mb();
7424
7425         spin_unlock_bh(&tp->lock);
7426
7427         for (i = 0; i < tp->irq_cnt; i++)
7428                 synchronize_irq(tp->napi[i].irq_vec);
7429
7430         spin_lock_bh(&tp->lock);
7431 }
7432
7433 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7434  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7435  * with as well.  Most of the time, this is not necessary except when
7436  * shutting down the device.
7437  */
7438 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7439 {
7440         spin_lock_bh(&tp->lock);
7441         if (irq_sync)
7442                 tg3_irq_quiesce(tp);
7443 }
7444
7445 static inline void tg3_full_unlock(struct tg3 *tp)
7446 {
7447         spin_unlock_bh(&tp->lock);
7448 }
7449
7450 /* One-shot MSI handler - Chip automatically disables interrupt
7451  * after sending MSI so driver doesn't have to do it.
7452  */
7453 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7454 {
7455         struct tg3_napi *tnapi = dev_id;
7456         struct tg3 *tp = tnapi->tp;
7457
7458         prefetch(tnapi->hw_status);
7459         if (tnapi->rx_rcb)
7460                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7461
7462         if (likely(!tg3_irq_sync(tp)))
7463                 napi_schedule(&tnapi->napi);
7464
7465         return IRQ_HANDLED;
7466 }
7467
7468 /* MSI ISR - No need to check for interrupt sharing and no need to
7469  * flush status block and interrupt mailbox. PCI ordering rules
7470  * guarantee that MSI will arrive after the status block.
7471  */
7472 static irqreturn_t tg3_msi(int irq, void *dev_id)
7473 {
7474         struct tg3_napi *tnapi = dev_id;
7475         struct tg3 *tp = tnapi->tp;
7476
7477         prefetch(tnapi->hw_status);
7478         if (tnapi->rx_rcb)
7479                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7480         /*
7481          * Writing any value to intr-mbox-0 clears PCI INTA# and
7482          * chip-internal interrupt pending events.
7483          * Writing non-zero to intr-mbox-0 additional tells the
7484          * NIC to stop sending us irqs, engaging "in-intr-handler"
7485          * event coalescing.
7486          */
7487         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7488         if (likely(!tg3_irq_sync(tp)))
7489                 napi_schedule(&tnapi->napi);
7490
7491         return IRQ_RETVAL(1);
7492 }
7493
7494 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7495 {
7496         struct tg3_napi *tnapi = dev_id;
7497         struct tg3 *tp = tnapi->tp;
7498         struct tg3_hw_status *sblk = tnapi->hw_status;
7499         unsigned int handled = 1;
7500
7501         /* In INTx mode, it is possible for the interrupt to arrive at
7502          * the CPU before the status block posted prior to the interrupt.
7503          * Reading the PCI State register will confirm whether the
7504          * interrupt is ours and will flush the status block.
7505          */
7506         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7507                 if (tg3_flag(tp, CHIP_RESETTING) ||
7508                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7509                         handled = 0;
7510                         goto out;
7511                 }
7512         }
7513
7514         /*
7515          * Writing any value to intr-mbox-0 clears PCI INTA# and
7516          * chip-internal interrupt pending events.
7517          * Writing non-zero to intr-mbox-0 additional tells the
7518          * NIC to stop sending us irqs, engaging "in-intr-handler"
7519          * event coalescing.
7520          *
7521          * Flush the mailbox to de-assert the IRQ immediately to prevent
7522          * spurious interrupts.  The flush impacts performance but
7523          * excessive spurious interrupts can be worse in some cases.
7524          */
7525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7526         if (tg3_irq_sync(tp))
7527                 goto out;
7528         sblk->status &= ~SD_STATUS_UPDATED;
7529         if (likely(tg3_has_work(tnapi))) {
7530                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7531                 napi_schedule(&tnapi->napi);
7532         } else {
7533                 /* No work, shared interrupt perhaps?  re-enable
7534                  * interrupts, and flush that PCI write
7535                  */
7536                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7537                                0x00000000);
7538         }
7539 out:
7540         return IRQ_RETVAL(handled);
7541 }
7542
7543 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7544 {
7545         struct tg3_napi *tnapi = dev_id;
7546         struct tg3 *tp = tnapi->tp;
7547         struct tg3_hw_status *sblk = tnapi->hw_status;
7548         unsigned int handled = 1;
7549
7550         /* In INTx mode, it is possible for the interrupt to arrive at
7551          * the CPU before the status block posted prior to the interrupt.
7552          * Reading the PCI State register will confirm whether the
7553          * interrupt is ours and will flush the status block.
7554          */
7555         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7556                 if (tg3_flag(tp, CHIP_RESETTING) ||
7557                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7558                         handled = 0;
7559                         goto out;
7560                 }
7561         }
7562
7563         /*
7564          * writing any value to intr-mbox-0 clears PCI INTA# and
7565          * chip-internal interrupt pending events.
7566          * writing non-zero to intr-mbox-0 additional tells the
7567          * NIC to stop sending us irqs, engaging "in-intr-handler"
7568          * event coalescing.
7569          *
7570          * Flush the mailbox to de-assert the IRQ immediately to prevent
7571          * spurious interrupts.  The flush impacts performance but
7572          * excessive spurious interrupts can be worse in some cases.
7573          */
7574         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7575
7576         /*
7577          * In a shared interrupt configuration, sometimes other devices'
7578          * interrupts will scream.  We record the current status tag here
7579          * so that the above check can report that the screaming interrupts
7580          * are unhandled.  Eventually they will be silenced.
7581          */
7582         tnapi->last_irq_tag = sblk->status_tag;
7583
7584         if (tg3_irq_sync(tp))
7585                 goto out;
7586
7587         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7588
7589         napi_schedule(&tnapi->napi);
7590
7591 out:
7592         return IRQ_RETVAL(handled);
7593 }
7594
7595 /* ISR for interrupt test */
7596 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7597 {
7598         struct tg3_napi *tnapi = dev_id;
7599         struct tg3 *tp = tnapi->tp;
7600         struct tg3_hw_status *sblk = tnapi->hw_status;
7601
7602         if ((sblk->status & SD_STATUS_UPDATED) ||
7603             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7604                 tg3_disable_ints(tp);
7605                 return IRQ_RETVAL(1);
7606         }
7607         return IRQ_RETVAL(0);
7608 }
7609
7610 #ifdef CONFIG_NET_POLL_CONTROLLER
7611 static void tg3_poll_controller(struct net_device *dev)
7612 {
7613         int i;
7614         struct tg3 *tp = netdev_priv(dev);
7615
7616         if (tg3_irq_sync(tp))
7617                 return;
7618
7619         for (i = 0; i < tp->irq_cnt; i++)
7620                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7621 }
7622 #endif
7623
7624 static void tg3_tx_timeout(struct net_device *dev)
7625 {
7626         struct tg3 *tp = netdev_priv(dev);
7627
7628         if (netif_msg_tx_err(tp)) {
7629                 netdev_err(dev, "transmit timed out, resetting\n");
7630                 tg3_dump_state(tp);
7631         }
7632
7633         tg3_reset_task_schedule(tp);
7634 }
7635
7636 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7637 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7638 {
7639         u32 base = (u32) mapping & 0xffffffff;
7640
7641         return base + len + 8 < base;
7642 }
7643
7644 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7645  * of any 4GB boundaries: 4G, 8G, etc
7646  */
7647 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7648                                            u32 len, u32 mss)
7649 {
7650         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7651                 u32 base = (u32) mapping & 0xffffffff;
7652
7653                 return ((base + len + (mss & 0x3fff)) < base);
7654         }
7655         return 0;
7656 }
7657
7658 /* Test for DMA addresses > 40-bit */
7659 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660                                           int len)
7661 {
7662 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7663         if (tg3_flag(tp, 40BIT_DMA_BUG))
7664                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7665         return 0;
7666 #else
7667         return 0;
7668 #endif
7669 }
7670
7671 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7672                                  dma_addr_t mapping, u32 len, u32 flags,
7673                                  u32 mss, u32 vlan)
7674 {
7675         txbd->addr_hi = ((u64) mapping >> 32);
7676         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7677         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7678         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7679 }
7680
7681 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7682                             dma_addr_t map, u32 len, u32 flags,
7683                             u32 mss, u32 vlan)
7684 {
7685         struct tg3 *tp = tnapi->tp;
7686         bool hwbug = false;
7687
7688         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7689                 hwbug = true;
7690
7691         if (tg3_4g_overflow_test(map, len))
7692                 hwbug = true;
7693
7694         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7695                 hwbug = true;
7696
7697         if (tg3_40bit_overflow_test(tp, map, len))
7698                 hwbug = true;
7699
7700         if (tp->dma_limit) {
7701                 u32 prvidx = *entry;
7702                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7703                 while (len > tp->dma_limit && *budget) {
7704                         u32 frag_len = tp->dma_limit;
7705                         len -= tp->dma_limit;
7706
7707                         /* Avoid the 8byte DMA problem */
7708                         if (len <= 8) {
7709                                 len += tp->dma_limit / 2;
7710                                 frag_len = tp->dma_limit / 2;
7711                         }
7712
7713                         tnapi->tx_buffers[*entry].fragmented = true;
7714
7715                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7716                                       frag_len, tmp_flag, mss, vlan);
7717                         *budget -= 1;
7718                         prvidx = *entry;
7719                         *entry = NEXT_TX(*entry);
7720
7721                         map += frag_len;
7722                 }
7723
7724                 if (len) {
7725                         if (*budget) {
7726                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7727                                               len, flags, mss, vlan);
7728                                 *budget -= 1;
7729                                 *entry = NEXT_TX(*entry);
7730                         } else {
7731                                 hwbug = true;
7732                                 tnapi->tx_buffers[prvidx].fragmented = false;
7733                         }
7734                 }
7735         } else {
7736                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737                               len, flags, mss, vlan);
7738                 *entry = NEXT_TX(*entry);
7739         }
7740
7741         return hwbug;
7742 }
7743
7744 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7745 {
7746         int i;
7747         struct sk_buff *skb;
7748         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7749
7750         skb = txb->skb;
7751         txb->skb = NULL;
7752
7753         pci_unmap_single(tnapi->tp->pdev,
7754                          dma_unmap_addr(txb, mapping),
7755                          skb_headlen(skb),
7756                          PCI_DMA_TODEVICE);
7757
7758         while (txb->fragmented) {
7759                 txb->fragmented = false;
7760                 entry = NEXT_TX(entry);
7761                 txb = &tnapi->tx_buffers[entry];
7762         }
7763
7764         for (i = 0; i <= last; i++) {
7765                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7766
7767                 entry = NEXT_TX(entry);
7768                 txb = &tnapi->tx_buffers[entry];
7769
7770                 pci_unmap_page(tnapi->tp->pdev,
7771                                dma_unmap_addr(txb, mapping),
7772                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7773
7774                 while (txb->fragmented) {
7775                         txb->fragmented = false;
7776                         entry = NEXT_TX(entry);
7777                         txb = &tnapi->tx_buffers[entry];
7778                 }
7779         }
7780 }
7781
7782 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7783 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7784                                        struct sk_buff **pskb,
7785                                        u32 *entry, u32 *budget,
7786                                        u32 base_flags, u32 mss, u32 vlan)
7787 {
7788         struct tg3 *tp = tnapi->tp;
7789         struct sk_buff *new_skb, *skb = *pskb;
7790         dma_addr_t new_addr = 0;
7791         int ret = 0;
7792
7793         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7794                 new_skb = skb_copy(skb, GFP_ATOMIC);
7795         else {
7796                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7797
7798                 new_skb = skb_copy_expand(skb,
7799                                           skb_headroom(skb) + more_headroom,
7800                                           skb_tailroom(skb), GFP_ATOMIC);
7801         }
7802
7803         if (!new_skb) {
7804                 ret = -1;
7805         } else {
7806                 /* New SKB is guaranteed to be linear. */
7807                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7808                                           PCI_DMA_TODEVICE);
7809                 /* Make sure the mapping succeeded */
7810                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7811                         dev_kfree_skb_any(new_skb);
7812                         ret = -1;
7813                 } else {
7814                         u32 save_entry = *entry;
7815
7816                         base_flags |= TXD_FLAG_END;
7817
7818                         tnapi->tx_buffers[*entry].skb = new_skb;
7819                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7820                                            mapping, new_addr);
7821
7822                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7823                                             new_skb->len, base_flags,
7824                                             mss, vlan)) {
7825                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7826                                 dev_kfree_skb_any(new_skb);
7827                                 ret = -1;
7828                         }
7829                 }
7830         }
7831
7832         dev_consume_skb_any(skb);
7833         *pskb = new_skb;
7834         return ret;
7835 }
7836
7837 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7838 {
7839         /* Check if we will never have enough descriptors,
7840          * as gso_segs can be more than current ring size
7841          */
7842         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7843 }
7844
7845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7846
7847 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7848  * indicated in tg3_tx_frag_set()
7849  */
7850 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7851                        struct netdev_queue *txq, struct sk_buff *skb)
7852 {
7853         struct sk_buff *segs, *nskb;
7854         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7855
7856         /* Estimate the number of fragments in the worst case */
7857         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7858                 netif_tx_stop_queue(txq);
7859
7860                 /* netif_tx_stop_queue() must be done before checking
7861                  * checking tx index in tg3_tx_avail() below, because in
7862                  * tg3_tx(), we update tx index before checking for
7863                  * netif_tx_queue_stopped().
7864                  */
7865                 smp_mb();
7866                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7867                         return NETDEV_TX_BUSY;
7868
7869                 netif_tx_wake_queue(txq);
7870         }
7871
7872         segs = skb_gso_segment(skb, tp->dev->features &
7873                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7874         if (IS_ERR(segs) || !segs)
7875                 goto tg3_tso_bug_end;
7876
7877         do {
7878                 nskb = segs;
7879                 segs = segs->next;
7880                 nskb->next = NULL;
7881                 tg3_start_xmit(nskb, tp->dev);
7882         } while (segs);
7883
7884 tg3_tso_bug_end:
7885         dev_consume_skb_any(skb);
7886
7887         return NETDEV_TX_OK;
7888 }
7889
7890 /* hard_start_xmit for all devices */
7891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7892 {
7893         struct tg3 *tp = netdev_priv(dev);
7894         u32 len, entry, base_flags, mss, vlan = 0;
7895         u32 budget;
7896         int i = -1, would_hit_hwbug;
7897         dma_addr_t mapping;
7898         struct tg3_napi *tnapi;
7899         struct netdev_queue *txq;
7900         unsigned int last;
7901         struct iphdr *iph = NULL;
7902         struct tcphdr *tcph = NULL;
7903         __sum16 tcp_csum = 0, ip_csum = 0;
7904         __be16 ip_tot_len = 0;
7905
7906         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7907         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7908         if (tg3_flag(tp, ENABLE_TSS))
7909                 tnapi++;
7910
7911         budget = tg3_tx_avail(tnapi);
7912
7913         /* We are running in BH disabled context with netif_tx_lock
7914          * and TX reclaim runs via tp->napi.poll inside of a software
7915          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7916          * no IRQ context deadlocks to worry about either.  Rejoice!
7917          */
7918         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7919                 if (!netif_tx_queue_stopped(txq)) {
7920                         netif_tx_stop_queue(txq);
7921
7922                         /* This is a hard error, log it. */
7923                         netdev_err(dev,
7924                                    "BUG! Tx Ring full when queue awake!\n");
7925                 }
7926                 return NETDEV_TX_BUSY;
7927         }
7928
7929         entry = tnapi->tx_prod;
7930         base_flags = 0;
7931
7932         mss = skb_shinfo(skb)->gso_size;
7933         if (mss) {
7934                 u32 tcp_opt_len, hdr_len;
7935
7936                 if (skb_cow_head(skb, 0))
7937                         goto drop;
7938
7939                 iph = ip_hdr(skb);
7940                 tcp_opt_len = tcp_optlen(skb);
7941
7942                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7943
7944                 /* HW/FW can not correctly segment packets that have been
7945                  * vlan encapsulated.
7946                  */
7947                 if (skb->protocol == htons(ETH_P_8021Q) ||
7948                     skb->protocol == htons(ETH_P_8021AD)) {
7949                         if (tg3_tso_bug_gso_check(tnapi, skb))
7950                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7951                         goto drop;
7952                 }
7953
7954                 if (!skb_is_gso_v6(skb)) {
7955                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7956                             tg3_flag(tp, TSO_BUG)) {
7957                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7958                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7959                                 goto drop;
7960                         }
7961                         ip_csum = iph->check;
7962                         ip_tot_len = iph->tot_len;
7963                         iph->check = 0;
7964                         iph->tot_len = htons(mss + hdr_len);
7965                 }
7966
7967                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7968                                TXD_FLAG_CPU_POST_DMA);
7969
7970                 tcph = tcp_hdr(skb);
7971                 tcp_csum = tcph->check;
7972
7973                 if (tg3_flag(tp, HW_TSO_1) ||
7974                     tg3_flag(tp, HW_TSO_2) ||
7975                     tg3_flag(tp, HW_TSO_3)) {
7976                         tcph->check = 0;
7977                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7978                 } else {
7979                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7980                                                          0, IPPROTO_TCP, 0);
7981                 }
7982
7983                 if (tg3_flag(tp, HW_TSO_3)) {
7984                         mss |= (hdr_len & 0xc) << 12;
7985                         if (hdr_len & 0x10)
7986                                 base_flags |= 0x00000010;
7987                         base_flags |= (hdr_len & 0x3e0) << 5;
7988                 } else if (tg3_flag(tp, HW_TSO_2))
7989                         mss |= hdr_len << 9;
7990                 else if (tg3_flag(tp, HW_TSO_1) ||
7991                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7992                         if (tcp_opt_len || iph->ihl > 5) {
7993                                 int tsflags;
7994
7995                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7996                                 mss |= (tsflags << 11);
7997                         }
7998                 } else {
7999                         if (tcp_opt_len || iph->ihl > 5) {
8000                                 int tsflags;
8001
8002                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8003                                 base_flags |= tsflags << 12;
8004                         }
8005                 }
8006         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8007                 /* HW/FW can not correctly checksum packets that have been
8008                  * vlan encapsulated.
8009                  */
8010                 if (skb->protocol == htons(ETH_P_8021Q) ||
8011                     skb->protocol == htons(ETH_P_8021AD)) {
8012                         if (skb_checksum_help(skb))
8013                                 goto drop;
8014                 } else  {
8015                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8016                 }
8017         }
8018
8019         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8020             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8021                 base_flags |= TXD_FLAG_JMB_PKT;
8022
8023         if (skb_vlan_tag_present(skb)) {
8024                 base_flags |= TXD_FLAG_VLAN;
8025                 vlan = skb_vlan_tag_get(skb);
8026         }
8027
8028         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8029             tg3_flag(tp, TX_TSTAMP_EN)) {
8030                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8031                 base_flags |= TXD_FLAG_HWTSTAMP;
8032         }
8033
8034         len = skb_headlen(skb);
8035
8036         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8037         if (pci_dma_mapping_error(tp->pdev, mapping))
8038                 goto drop;
8039
8040
8041         tnapi->tx_buffers[entry].skb = skb;
8042         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8043
8044         would_hit_hwbug = 0;
8045
8046         if (tg3_flag(tp, 5701_DMA_BUG))
8047                 would_hit_hwbug = 1;
8048
8049         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8050                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8051                             mss, vlan)) {
8052                 would_hit_hwbug = 1;
8053         } else if (skb_shinfo(skb)->nr_frags > 0) {
8054                 u32 tmp_mss = mss;
8055
8056                 if (!tg3_flag(tp, HW_TSO_1) &&
8057                     !tg3_flag(tp, HW_TSO_2) &&
8058                     !tg3_flag(tp, HW_TSO_3))
8059                         tmp_mss = 0;
8060
8061                 /* Now loop through additional data
8062                  * fragments, and queue them.
8063                  */
8064                 last = skb_shinfo(skb)->nr_frags - 1;
8065                 for (i = 0; i <= last; i++) {
8066                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8067
8068                         len = skb_frag_size(frag);
8069                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8070                                                    len, DMA_TO_DEVICE);
8071
8072                         tnapi->tx_buffers[entry].skb = NULL;
8073                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8074                                            mapping);
8075                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8076                                 goto dma_error;
8077
8078                         if (!budget ||
8079                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8080                                             len, base_flags |
8081                                             ((i == last) ? TXD_FLAG_END : 0),
8082                                             tmp_mss, vlan)) {
8083                                 would_hit_hwbug = 1;
8084                                 break;
8085                         }
8086                 }
8087         }
8088
8089         if (would_hit_hwbug) {
8090                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8091
8092                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8093                         /* If it's a TSO packet, do GSO instead of
8094                          * allocating and copying to a large linear SKB
8095                          */
8096                         if (ip_tot_len) {
8097                                 iph->check = ip_csum;
8098                                 iph->tot_len = ip_tot_len;
8099                         }
8100                         tcph->check = tcp_csum;
8101                         return tg3_tso_bug(tp, tnapi, txq, skb);
8102                 }
8103
8104                 /* If the workaround fails due to memory/mapping
8105                  * failure, silently drop this packet.
8106                  */
8107                 entry = tnapi->tx_prod;
8108                 budget = tg3_tx_avail(tnapi);
8109                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8110                                                 base_flags, mss, vlan))
8111                         goto drop_nofree;
8112         }
8113
8114         skb_tx_timestamp(skb);
8115         netdev_tx_sent_queue(txq, skb->len);
8116
8117         /* Sync BD data before updating mailbox */
8118         wmb();
8119
8120         tnapi->tx_prod = entry;
8121         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8122                 netif_tx_stop_queue(txq);
8123
8124                 /* netif_tx_stop_queue() must be done before checking
8125                  * checking tx index in tg3_tx_avail() below, because in
8126                  * tg3_tx(), we update tx index before checking for
8127                  * netif_tx_queue_stopped().
8128                  */
8129                 smp_mb();
8130                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8131                         netif_tx_wake_queue(txq);
8132         }
8133
8134         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8135                 /* Packets are ready, update Tx producer idx on card. */
8136                 tw32_tx_mbox(tnapi->prodmbox, entry);
8137                 mmiowb();
8138         }
8139
8140         return NETDEV_TX_OK;
8141
8142 dma_error:
8143         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8144         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8145 drop:
8146         dev_kfree_skb_any(skb);
8147 drop_nofree:
8148         tp->tx_dropped++;
8149         return NETDEV_TX_OK;
8150 }
8151
8152 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8153 {
8154         if (enable) {
8155                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8156                                   MAC_MODE_PORT_MODE_MASK);
8157
8158                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8159
8160                 if (!tg3_flag(tp, 5705_PLUS))
8161                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8162
8163                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8164                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8165                 else
8166                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8167         } else {
8168                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8169
8170                 if (tg3_flag(tp, 5705_PLUS) ||
8171                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8172                     tg3_asic_rev(tp) == ASIC_REV_5700)
8173                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8174         }
8175
8176         tw32(MAC_MODE, tp->mac_mode);
8177         udelay(40);
8178 }
8179
8180 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8181 {
8182         u32 val, bmcr, mac_mode, ptest = 0;
8183
8184         tg3_phy_toggle_apd(tp, false);
8185         tg3_phy_toggle_automdix(tp, false);
8186
8187         if (extlpbk && tg3_phy_set_extloopbk(tp))
8188                 return -EIO;
8189
8190         bmcr = BMCR_FULLDPLX;
8191         switch (speed) {
8192         case SPEED_10:
8193                 break;
8194         case SPEED_100:
8195                 bmcr |= BMCR_SPEED100;
8196                 break;
8197         case SPEED_1000:
8198         default:
8199                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8200                         speed = SPEED_100;
8201                         bmcr |= BMCR_SPEED100;
8202                 } else {
8203                         speed = SPEED_1000;
8204                         bmcr |= BMCR_SPEED1000;
8205                 }
8206         }
8207
8208         if (extlpbk) {
8209                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8210                         tg3_readphy(tp, MII_CTRL1000, &val);
8211                         val |= CTL1000_AS_MASTER |
8212                                CTL1000_ENABLE_MASTER;
8213                         tg3_writephy(tp, MII_CTRL1000, val);
8214                 } else {
8215                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8216                                 MII_TG3_FET_PTEST_TRIM_2;
8217                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8218                 }
8219         } else
8220                 bmcr |= BMCR_LOOPBACK;
8221
8222         tg3_writephy(tp, MII_BMCR, bmcr);
8223
8224         /* The write needs to be flushed for the FETs */
8225         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8226                 tg3_readphy(tp, MII_BMCR, &bmcr);
8227
8228         udelay(40);
8229
8230         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8231             tg3_asic_rev(tp) == ASIC_REV_5785) {
8232                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8233                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8234                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8235
8236                 /* The write needs to be flushed for the AC131 */
8237                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8238         }
8239
8240         /* Reset to prevent losing 1st rx packet intermittently */
8241         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8242             tg3_flag(tp, 5780_CLASS)) {
8243                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8244                 udelay(10);
8245                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8246         }
8247
8248         mac_mode = tp->mac_mode &
8249                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8250         if (speed == SPEED_1000)
8251                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8252         else
8253                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8254
8255         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8256                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8257
8258                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8259                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8260                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8261                         mac_mode |= MAC_MODE_LINK_POLARITY;
8262
8263                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8264                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8265         }
8266
8267         tw32(MAC_MODE, mac_mode);
8268         udelay(40);
8269
8270         return 0;
8271 }
8272
8273 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8274 {
8275         struct tg3 *tp = netdev_priv(dev);
8276
8277         if (features & NETIF_F_LOOPBACK) {
8278                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8279                         return;
8280
8281                 spin_lock_bh(&tp->lock);
8282                 tg3_mac_loopback(tp, true);
8283                 netif_carrier_on(tp->dev);
8284                 spin_unlock_bh(&tp->lock);
8285                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8286         } else {
8287                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8288                         return;
8289
8290                 spin_lock_bh(&tp->lock);
8291                 tg3_mac_loopback(tp, false);
8292                 /* Force link status check */
8293                 tg3_setup_phy(tp, true);
8294                 spin_unlock_bh(&tp->lock);
8295                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8296         }
8297 }
8298
8299 static netdev_features_t tg3_fix_features(struct net_device *dev,
8300         netdev_features_t features)
8301 {
8302         struct tg3 *tp = netdev_priv(dev);
8303
8304         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8305                 features &= ~NETIF_F_ALL_TSO;
8306
8307         return features;
8308 }
8309
8310 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8311 {
8312         netdev_features_t changed = dev->features ^ features;
8313
8314         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8315                 tg3_set_loopback(dev, features);
8316
8317         return 0;
8318 }
8319
8320 static void tg3_rx_prodring_free(struct tg3 *tp,
8321                                  struct tg3_rx_prodring_set *tpr)
8322 {
8323         int i;
8324
8325         if (tpr != &tp->napi[0].prodring) {
8326                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8327                      i = (i + 1) & tp->rx_std_ring_mask)
8328                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8329                                         tp->rx_pkt_map_sz);
8330
8331                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8332                         for (i = tpr->rx_jmb_cons_idx;
8333                              i != tpr->rx_jmb_prod_idx;
8334                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8335                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8336                                                 TG3_RX_JMB_MAP_SZ);
8337                         }
8338                 }
8339
8340                 return;
8341         }
8342
8343         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8344                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8345                                 tp->rx_pkt_map_sz);
8346
8347         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8348                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8349                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8350                                         TG3_RX_JMB_MAP_SZ);
8351         }
8352 }
8353
8354 /* Initialize rx rings for packet processing.
8355  *
8356  * The chip has been shut down and the driver detached from
8357  * the networking, so no interrupts or new tx packets will
8358  * end up in the driver.  tp->{tx,}lock are held and thus
8359  * we may not sleep.
8360  */
8361 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8362                                  struct tg3_rx_prodring_set *tpr)
8363 {
8364         u32 i, rx_pkt_dma_sz;
8365
8366         tpr->rx_std_cons_idx = 0;
8367         tpr->rx_std_prod_idx = 0;
8368         tpr->rx_jmb_cons_idx = 0;
8369         tpr->rx_jmb_prod_idx = 0;
8370
8371         if (tpr != &tp->napi[0].prodring) {
8372                 memset(&tpr->rx_std_buffers[0], 0,
8373                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8374                 if (tpr->rx_jmb_buffers)
8375                         memset(&tpr->rx_jmb_buffers[0], 0,
8376                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8377                 goto done;
8378         }
8379
8380         /* Zero out all descriptors. */
8381         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8382
8383         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8384         if (tg3_flag(tp, 5780_CLASS) &&
8385             tp->dev->mtu > ETH_DATA_LEN)
8386                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8387         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8388
8389         /* Initialize invariants of the rings, we only set this
8390          * stuff once.  This works because the card does not
8391          * write into the rx buffer posting rings.
8392          */
8393         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8394                 struct tg3_rx_buffer_desc *rxd;
8395
8396                 rxd = &tpr->rx_std[i];
8397                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8398                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8399                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8400                                (i << RXD_OPAQUE_INDEX_SHIFT));
8401         }
8402
8403         /* Now allocate fresh SKBs for each rx ring. */
8404         for (i = 0; i < tp->rx_pending; i++) {
8405                 unsigned int frag_size;
8406
8407                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8408                                       &frag_size) < 0) {
8409                         netdev_warn(tp->dev,
8410                                     "Using a smaller RX standard ring. Only "
8411                                     "%d out of %d buffers were allocated "
8412                                     "successfully\n", i, tp->rx_pending);
8413                         if (i == 0)
8414                                 goto initfail;
8415                         tp->rx_pending = i;
8416                         break;
8417                 }
8418         }
8419
8420         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8421                 goto done;
8422
8423         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8424
8425         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8426                 goto done;
8427
8428         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8429                 struct tg3_rx_buffer_desc *rxd;
8430
8431                 rxd = &tpr->rx_jmb[i].std;
8432                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8433                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8434                                   RXD_FLAG_JUMBO;
8435                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8436                        (i << RXD_OPAQUE_INDEX_SHIFT));
8437         }
8438
8439         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8440                 unsigned int frag_size;
8441
8442                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8443                                       &frag_size) < 0) {
8444                         netdev_warn(tp->dev,
8445                                     "Using a smaller RX jumbo ring. Only %d "
8446                                     "out of %d buffers were allocated "
8447                                     "successfully\n", i, tp->rx_jumbo_pending);
8448                         if (i == 0)
8449                                 goto initfail;
8450                         tp->rx_jumbo_pending = i;
8451                         break;
8452                 }
8453         }
8454
8455 done:
8456         return 0;
8457
8458 initfail:
8459         tg3_rx_prodring_free(tp, tpr);
8460         return -ENOMEM;
8461 }
8462
8463 static void tg3_rx_prodring_fini(struct tg3 *tp,
8464                                  struct tg3_rx_prodring_set *tpr)
8465 {
8466         kfree(tpr->rx_std_buffers);
8467         tpr->rx_std_buffers = NULL;
8468         kfree(tpr->rx_jmb_buffers);
8469         tpr->rx_jmb_buffers = NULL;
8470         if (tpr->rx_std) {
8471                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8472                                   tpr->rx_std, tpr->rx_std_mapping);
8473                 tpr->rx_std = NULL;
8474         }
8475         if (tpr->rx_jmb) {
8476                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8477                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8478                 tpr->rx_jmb = NULL;
8479         }
8480 }
8481
8482 static int tg3_rx_prodring_init(struct tg3 *tp,
8483                                 struct tg3_rx_prodring_set *tpr)
8484 {
8485         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8486                                       GFP_KERNEL);
8487         if (!tpr->rx_std_buffers)
8488                 return -ENOMEM;
8489
8490         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8491                                          TG3_RX_STD_RING_BYTES(tp),
8492                                          &tpr->rx_std_mapping,
8493                                          GFP_KERNEL);
8494         if (!tpr->rx_std)
8495                 goto err_out;
8496
8497         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8498                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8499                                               GFP_KERNEL);
8500                 if (!tpr->rx_jmb_buffers)
8501                         goto err_out;
8502
8503                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8504                                                  TG3_RX_JMB_RING_BYTES(tp),
8505                                                  &tpr->rx_jmb_mapping,
8506                                                  GFP_KERNEL);
8507                 if (!tpr->rx_jmb)
8508                         goto err_out;
8509         }
8510
8511         return 0;
8512
8513 err_out:
8514         tg3_rx_prodring_fini(tp, tpr);
8515         return -ENOMEM;
8516 }
8517
8518 /* Free up pending packets in all rx/tx rings.
8519  *
8520  * The chip has been shut down and the driver detached from
8521  * the networking, so no interrupts or new tx packets will
8522  * end up in the driver.  tp->{tx,}lock is not held and we are not
8523  * in an interrupt context and thus may sleep.
8524  */
8525 static void tg3_free_rings(struct tg3 *tp)
8526 {
8527         int i, j;
8528
8529         for (j = 0; j < tp->irq_cnt; j++) {
8530                 struct tg3_napi *tnapi = &tp->napi[j];
8531
8532                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8533
8534                 if (!tnapi->tx_buffers)
8535                         continue;
8536
8537                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8538                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8539
8540                         if (!skb)
8541                                 continue;
8542
8543                         tg3_tx_skb_unmap(tnapi, i,
8544                                          skb_shinfo(skb)->nr_frags - 1);
8545
8546                         dev_consume_skb_any(skb);
8547                 }
8548                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8549         }
8550 }
8551
8552 /* Initialize tx/rx rings for packet processing.
8553  *
8554  * The chip has been shut down and the driver detached from
8555  * the networking, so no interrupts or new tx packets will
8556  * end up in the driver.  tp->{tx,}lock are held and thus
8557  * we may not sleep.
8558  */
8559 static int tg3_init_rings(struct tg3 *tp)
8560 {
8561         int i;
8562
8563         /* Free up all the SKBs. */
8564         tg3_free_rings(tp);
8565
8566         for (i = 0; i < tp->irq_cnt; i++) {
8567                 struct tg3_napi *tnapi = &tp->napi[i];
8568
8569                 tnapi->last_tag = 0;
8570                 tnapi->last_irq_tag = 0;
8571                 tnapi->hw_status->status = 0;
8572                 tnapi->hw_status->status_tag = 0;
8573                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8574
8575                 tnapi->tx_prod = 0;
8576                 tnapi->tx_cons = 0;
8577                 if (tnapi->tx_ring)
8578                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8579
8580                 tnapi->rx_rcb_ptr = 0;
8581                 if (tnapi->rx_rcb)
8582                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8583
8584                 if (tnapi->prodring.rx_std &&
8585                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8586                         tg3_free_rings(tp);
8587                         return -ENOMEM;
8588                 }
8589         }
8590
8591         return 0;
8592 }
8593
8594 static void tg3_mem_tx_release(struct tg3 *tp)
8595 {
8596         int i;
8597
8598         for (i = 0; i < tp->irq_max; i++) {
8599                 struct tg3_napi *tnapi = &tp->napi[i];
8600
8601                 if (tnapi->tx_ring) {
8602                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8603                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8604                         tnapi->tx_ring = NULL;
8605                 }
8606
8607                 kfree(tnapi->tx_buffers);
8608                 tnapi->tx_buffers = NULL;
8609         }
8610 }
8611
8612 static int tg3_mem_tx_acquire(struct tg3 *tp)
8613 {
8614         int i;
8615         struct tg3_napi *tnapi = &tp->napi[0];
8616
8617         /* If multivector TSS is enabled, vector 0 does not handle
8618          * tx interrupts.  Don't allocate any resources for it.
8619          */
8620         if (tg3_flag(tp, ENABLE_TSS))
8621                 tnapi++;
8622
8623         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8624                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8625                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8626                 if (!tnapi->tx_buffers)
8627                         goto err_out;
8628
8629                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8630                                                     TG3_TX_RING_BYTES,
8631                                                     &tnapi->tx_desc_mapping,
8632                                                     GFP_KERNEL);
8633                 if (!tnapi->tx_ring)
8634                         goto err_out;
8635         }
8636
8637         return 0;
8638
8639 err_out:
8640         tg3_mem_tx_release(tp);
8641         return -ENOMEM;
8642 }
8643
8644 static void tg3_mem_rx_release(struct tg3 *tp)
8645 {
8646         int i;
8647
8648         for (i = 0; i < tp->irq_max; i++) {
8649                 struct tg3_napi *tnapi = &tp->napi[i];
8650
8651                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8652
8653                 if (!tnapi->rx_rcb)
8654                         continue;
8655
8656                 dma_free_coherent(&tp->pdev->dev,
8657                                   TG3_RX_RCB_RING_BYTES(tp),
8658                                   tnapi->rx_rcb,
8659                                   tnapi->rx_rcb_mapping);
8660                 tnapi->rx_rcb = NULL;
8661         }
8662 }
8663
8664 static int tg3_mem_rx_acquire(struct tg3 *tp)
8665 {
8666         unsigned int i, limit;
8667
8668         limit = tp->rxq_cnt;
8669
8670         /* If RSS is enabled, we need a (dummy) producer ring
8671          * set on vector zero.  This is the true hw prodring.
8672          */
8673         if (tg3_flag(tp, ENABLE_RSS))
8674                 limit++;
8675
8676         for (i = 0; i < limit; i++) {
8677                 struct tg3_napi *tnapi = &tp->napi[i];
8678
8679                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8680                         goto err_out;
8681
8682                 /* If multivector RSS is enabled, vector 0
8683                  * does not handle rx or tx interrupts.
8684                  * Don't allocate any resources for it.
8685                  */
8686                 if (!i && tg3_flag(tp, ENABLE_RSS))
8687                         continue;
8688
8689                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8690                                                     TG3_RX_RCB_RING_BYTES(tp),
8691                                                     &tnapi->rx_rcb_mapping,
8692                                                     GFP_KERNEL);
8693                 if (!tnapi->rx_rcb)
8694                         goto err_out;
8695         }
8696
8697         return 0;
8698
8699 err_out:
8700         tg3_mem_rx_release(tp);
8701         return -ENOMEM;
8702 }
8703
8704 /*
8705  * Must not be invoked with interrupt sources disabled and
8706  * the hardware shutdown down.
8707  */
8708 static void tg3_free_consistent(struct tg3 *tp)
8709 {
8710         int i;
8711
8712         for (i = 0; i < tp->irq_cnt; i++) {
8713                 struct tg3_napi *tnapi = &tp->napi[i];
8714
8715                 if (tnapi->hw_status) {
8716                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8717                                           tnapi->hw_status,
8718                                           tnapi->status_mapping);
8719                         tnapi->hw_status = NULL;
8720                 }
8721         }
8722
8723         tg3_mem_rx_release(tp);
8724         tg3_mem_tx_release(tp);
8725
8726         /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8727         tg3_full_lock(tp, 0);
8728         if (tp->hw_stats) {
8729                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8730                                   tp->hw_stats, tp->stats_mapping);
8731                 tp->hw_stats = NULL;
8732         }
8733         tg3_full_unlock(tp);
8734 }
8735
8736 /*
8737  * Must not be invoked with interrupt sources disabled and
8738  * the hardware shutdown down.  Can sleep.
8739  */
8740 static int tg3_alloc_consistent(struct tg3 *tp)
8741 {
8742         int i;
8743
8744         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8745                                            sizeof(struct tg3_hw_stats),
8746                                            &tp->stats_mapping, GFP_KERNEL);
8747         if (!tp->hw_stats)
8748                 goto err_out;
8749
8750         for (i = 0; i < tp->irq_cnt; i++) {
8751                 struct tg3_napi *tnapi = &tp->napi[i];
8752                 struct tg3_hw_status *sblk;
8753
8754                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8755                                                        TG3_HW_STATUS_SIZE,
8756                                                        &tnapi->status_mapping,
8757                                                        GFP_KERNEL);
8758                 if (!tnapi->hw_status)
8759                         goto err_out;
8760
8761                 sblk = tnapi->hw_status;
8762
8763                 if (tg3_flag(tp, ENABLE_RSS)) {
8764                         u16 *prodptr = NULL;
8765
8766                         /*
8767                          * When RSS is enabled, the status block format changes
8768                          * slightly.  The "rx_jumbo_consumer", "reserved",
8769                          * and "rx_mini_consumer" members get mapped to the
8770                          * other three rx return ring producer indexes.
8771                          */
8772                         switch (i) {
8773                         case 1:
8774                                 prodptr = &sblk->idx[0].rx_producer;
8775                                 break;
8776                         case 2:
8777                                 prodptr = &sblk->rx_jumbo_consumer;
8778                                 break;
8779                         case 3:
8780                                 prodptr = &sblk->reserved;
8781                                 break;
8782                         case 4:
8783                                 prodptr = &sblk->rx_mini_consumer;
8784                                 break;
8785                         }
8786                         tnapi->rx_rcb_prod_idx = prodptr;
8787                 } else {
8788                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8789                 }
8790         }
8791
8792         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8793                 goto err_out;
8794
8795         return 0;
8796
8797 err_out:
8798         tg3_free_consistent(tp);
8799         return -ENOMEM;
8800 }
8801
8802 #define MAX_WAIT_CNT 1000
8803
8804 /* To stop a block, clear the enable bit and poll till it
8805  * clears.  tp->lock is held.
8806  */
8807 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8808 {
8809         unsigned int i;
8810         u32 val;
8811
8812         if (tg3_flag(tp, 5705_PLUS)) {
8813                 switch (ofs) {
8814                 case RCVLSC_MODE:
8815                 case DMAC_MODE:
8816                 case MBFREE_MODE:
8817                 case BUFMGR_MODE:
8818                 case MEMARB_MODE:
8819                         /* We can't enable/disable these bits of the
8820                          * 5705/5750, just say success.
8821                          */
8822                         return 0;
8823
8824                 default:
8825                         break;
8826                 }
8827         }
8828
8829         val = tr32(ofs);
8830         val &= ~enable_bit;
8831         tw32_f(ofs, val);
8832
8833         for (i = 0; i < MAX_WAIT_CNT; i++) {
8834                 if (pci_channel_offline(tp->pdev)) {
8835                         dev_err(&tp->pdev->dev,
8836                                 "tg3_stop_block device offline, "
8837                                 "ofs=%lx enable_bit=%x\n",
8838                                 ofs, enable_bit);
8839                         return -ENODEV;
8840                 }
8841
8842                 udelay(100);
8843                 val = tr32(ofs);
8844                 if ((val & enable_bit) == 0)
8845                         break;
8846         }
8847
8848         if (i == MAX_WAIT_CNT && !silent) {
8849                 dev_err(&tp->pdev->dev,
8850                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8851                         ofs, enable_bit);
8852                 return -ENODEV;
8853         }
8854
8855         return 0;
8856 }
8857
8858 /* tp->lock is held. */
8859 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8860 {
8861         int i, err;
8862
8863         tg3_disable_ints(tp);
8864
8865         if (pci_channel_offline(tp->pdev)) {
8866                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8867                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8868                 err = -ENODEV;
8869                 goto err_no_dev;
8870         }
8871
8872         tp->rx_mode &= ~RX_MODE_ENABLE;
8873         tw32_f(MAC_RX_MODE, tp->rx_mode);
8874         udelay(10);
8875
8876         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8877         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8882
8883         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8884         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8885         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8887         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8890
8891         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8892         tw32_f(MAC_MODE, tp->mac_mode);
8893         udelay(40);
8894
8895         tp->tx_mode &= ~TX_MODE_ENABLE;
8896         tw32_f(MAC_TX_MODE, tp->tx_mode);
8897
8898         for (i = 0; i < MAX_WAIT_CNT; i++) {
8899                 udelay(100);
8900                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8901                         break;
8902         }
8903         if (i >= MAX_WAIT_CNT) {
8904                 dev_err(&tp->pdev->dev,
8905                         "%s timed out, TX_MODE_ENABLE will not clear "
8906                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8907                 err |= -ENODEV;
8908         }
8909
8910         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8911         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8912         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8913
8914         tw32(FTQ_RESET, 0xffffffff);
8915         tw32(FTQ_RESET, 0x00000000);
8916
8917         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8918         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8919
8920 err_no_dev:
8921         for (i = 0; i < tp->irq_cnt; i++) {
8922                 struct tg3_napi *tnapi = &tp->napi[i];
8923                 if (tnapi->hw_status)
8924                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8925         }
8926
8927         return err;
8928 }
8929
8930 /* Save PCI command register before chip reset */
8931 static void tg3_save_pci_state(struct tg3 *tp)
8932 {
8933         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8934 }
8935
8936 /* Restore PCI state after chip reset */
8937 static void tg3_restore_pci_state(struct tg3 *tp)
8938 {
8939         u32 val;
8940
8941         /* Re-enable indirect register accesses. */
8942         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8943                                tp->misc_host_ctrl);
8944
8945         /* Set MAX PCI retry to zero. */
8946         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8947         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8948             tg3_flag(tp, PCIX_MODE))
8949                 val |= PCISTATE_RETRY_SAME_DMA;
8950         /* Allow reads and writes to the APE register and memory space. */
8951         if (tg3_flag(tp, ENABLE_APE))
8952                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8953                        PCISTATE_ALLOW_APE_SHMEM_WR |
8954                        PCISTATE_ALLOW_APE_PSPACE_WR;
8955         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8956
8957         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8958
8959         if (!tg3_flag(tp, PCI_EXPRESS)) {
8960                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8961                                       tp->pci_cacheline_sz);
8962                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8963                                       tp->pci_lat_timer);
8964         }
8965
8966         /* Make sure PCI-X relaxed ordering bit is clear. */
8967         if (tg3_flag(tp, PCIX_MODE)) {
8968                 u16 pcix_cmd;
8969
8970                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971                                      &pcix_cmd);
8972                 pcix_cmd &= ~PCI_X_CMD_ERO;
8973                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8974                                       pcix_cmd);
8975         }
8976
8977         if (tg3_flag(tp, 5780_CLASS)) {
8978
8979                 /* Chip reset on 5780 will reset MSI enable bit,
8980                  * so need to restore it.
8981                  */
8982                 if (tg3_flag(tp, USING_MSI)) {
8983                         u16 ctrl;
8984
8985                         pci_read_config_word(tp->pdev,
8986                                              tp->msi_cap + PCI_MSI_FLAGS,
8987                                              &ctrl);
8988                         pci_write_config_word(tp->pdev,
8989                                               tp->msi_cap + PCI_MSI_FLAGS,
8990                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8991                         val = tr32(MSGINT_MODE);
8992                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8993                 }
8994         }
8995 }
8996
8997 static void tg3_override_clk(struct tg3 *tp)
8998 {
8999         u32 val;
9000
9001         switch (tg3_asic_rev(tp)) {
9002         case ASIC_REV_5717:
9003                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9004                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9005                      TG3_CPMU_MAC_ORIDE_ENABLE);
9006                 break;
9007
9008         case ASIC_REV_5719:
9009         case ASIC_REV_5720:
9010                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9011                 break;
9012
9013         default:
9014                 return;
9015         }
9016 }
9017
9018 static void tg3_restore_clk(struct tg3 *tp)
9019 {
9020         u32 val;
9021
9022         switch (tg3_asic_rev(tp)) {
9023         case ASIC_REV_5717:
9024                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9025                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9026                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9027                 break;
9028
9029         case ASIC_REV_5719:
9030         case ASIC_REV_5720:
9031                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9032                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9033                 break;
9034
9035         default:
9036                 return;
9037         }
9038 }
9039
9040 /* tp->lock is held. */
9041 static int tg3_chip_reset(struct tg3 *tp)
9042         __releases(tp->lock)
9043         __acquires(tp->lock)
9044 {
9045         u32 val;
9046         void (*write_op)(struct tg3 *, u32, u32);
9047         int i, err;
9048
9049         if (!pci_device_is_present(tp->pdev))
9050                 return -ENODEV;
9051
9052         tg3_nvram_lock(tp);
9053
9054         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9055
9056         /* No matching tg3_nvram_unlock() after this because
9057          * chip reset below will undo the nvram lock.
9058          */
9059         tp->nvram_lock_cnt = 0;
9060
9061         /* GRC_MISC_CFG core clock reset will clear the memory
9062          * enable bit in PCI register 4 and the MSI enable bit
9063          * on some chips, so we save relevant registers here.
9064          */
9065         tg3_save_pci_state(tp);
9066
9067         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9068             tg3_flag(tp, 5755_PLUS))
9069                 tw32(GRC_FASTBOOT_PC, 0);
9070
9071         /*
9072          * We must avoid the readl() that normally takes place.
9073          * It locks machines, causes machine checks, and other
9074          * fun things.  So, temporarily disable the 5701
9075          * hardware workaround, while we do the reset.
9076          */
9077         write_op = tp->write32;
9078         if (write_op == tg3_write_flush_reg32)
9079                 tp->write32 = tg3_write32;
9080
9081         /* Prevent the irq handler from reading or writing PCI registers
9082          * during chip reset when the memory enable bit in the PCI command
9083          * register may be cleared.  The chip does not generate interrupt
9084          * at this time, but the irq handler may still be called due to irq
9085          * sharing or irqpoll.
9086          */
9087         tg3_flag_set(tp, CHIP_RESETTING);
9088         for (i = 0; i < tp->irq_cnt; i++) {
9089                 struct tg3_napi *tnapi = &tp->napi[i];
9090                 if (tnapi->hw_status) {
9091                         tnapi->hw_status->status = 0;
9092                         tnapi->hw_status->status_tag = 0;
9093                 }
9094                 tnapi->last_tag = 0;
9095                 tnapi->last_irq_tag = 0;
9096         }
9097         smp_mb();
9098
9099         tg3_full_unlock(tp);
9100
9101         for (i = 0; i < tp->irq_cnt; i++)
9102                 synchronize_irq(tp->napi[i].irq_vec);
9103
9104         tg3_full_lock(tp, 0);
9105
9106         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9107                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9108                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9109         }
9110
9111         /* do the reset */
9112         val = GRC_MISC_CFG_CORECLK_RESET;
9113
9114         if (tg3_flag(tp, PCI_EXPRESS)) {
9115                 /* Force PCIe 1.0a mode */
9116                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9117                     !tg3_flag(tp, 57765_PLUS) &&
9118                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9119                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9120                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9121
9122                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9123                         tw32(GRC_MISC_CFG, (1 << 29));
9124                         val |= (1 << 29);
9125                 }
9126         }
9127
9128         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9129                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9130                 tw32(GRC_VCPU_EXT_CTRL,
9131                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9132         }
9133
9134         /* Set the clock to the highest frequency to avoid timeouts. With link
9135          * aware mode, the clock speed could be slow and bootcode does not
9136          * complete within the expected time. Override the clock to allow the
9137          * bootcode to finish sooner and then restore it.
9138          */
9139         tg3_override_clk(tp);
9140
9141         /* Manage gphy power for all CPMU absent PCIe devices. */
9142         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9143                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9144
9145         tw32(GRC_MISC_CFG, val);
9146
9147         /* restore 5701 hardware bug workaround write method */
9148         tp->write32 = write_op;
9149
9150         /* Unfortunately, we have to delay before the PCI read back.
9151          * Some 575X chips even will not respond to a PCI cfg access
9152          * when the reset command is given to the chip.
9153          *
9154          * How do these hardware designers expect things to work
9155          * properly if the PCI write is posted for a long period
9156          * of time?  It is always necessary to have some method by
9157          * which a register read back can occur to push the write
9158          * out which does the reset.
9159          *
9160          * For most tg3 variants the trick below was working.
9161          * Ho hum...
9162          */
9163         udelay(120);
9164
9165         /* Flush PCI posted writes.  The normal MMIO registers
9166          * are inaccessible at this time so this is the only
9167          * way to make this reliably (actually, this is no longer
9168          * the case, see above).  I tried to use indirect
9169          * register read/write but this upset some 5701 variants.
9170          */
9171         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9172
9173         udelay(120);
9174
9175         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9176                 u16 val16;
9177
9178                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9179                         int j;
9180                         u32 cfg_val;
9181
9182                         /* Wait for link training to complete.  */
9183                         for (j = 0; j < 5000; j++)
9184                                 udelay(100);
9185
9186                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9187                         pci_write_config_dword(tp->pdev, 0xc4,
9188                                                cfg_val | (1 << 15));
9189                 }
9190
9191                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9192                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9193                 /*
9194                  * Older PCIe devices only support the 128 byte
9195                  * MPS setting.  Enforce the restriction.
9196                  */
9197                 if (!tg3_flag(tp, CPMU_PRESENT))
9198                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9199                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9200
9201                 /* Clear error status */
9202                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9203                                       PCI_EXP_DEVSTA_CED |
9204                                       PCI_EXP_DEVSTA_NFED |
9205                                       PCI_EXP_DEVSTA_FED |
9206                                       PCI_EXP_DEVSTA_URD);
9207         }
9208
9209         tg3_restore_pci_state(tp);
9210
9211         tg3_flag_clear(tp, CHIP_RESETTING);
9212         tg3_flag_clear(tp, ERROR_PROCESSED);
9213
9214         val = 0;
9215         if (tg3_flag(tp, 5780_CLASS))
9216                 val = tr32(MEMARB_MODE);
9217         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9218
9219         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9220                 tg3_stop_fw(tp);
9221                 tw32(0x5000, 0x400);
9222         }
9223
9224         if (tg3_flag(tp, IS_SSB_CORE)) {
9225                 /*
9226                  * BCM4785: In order to avoid repercussions from using
9227                  * potentially defective internal ROM, stop the Rx RISC CPU,
9228                  * which is not required.
9229                  */
9230                 tg3_stop_fw(tp);
9231                 tg3_halt_cpu(tp, RX_CPU_BASE);
9232         }
9233
9234         err = tg3_poll_fw(tp);
9235         if (err)
9236                 return err;
9237
9238         tw32(GRC_MODE, tp->grc_mode);
9239
9240         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9241                 val = tr32(0xc4);
9242
9243                 tw32(0xc4, val | (1 << 15));
9244         }
9245
9246         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9247             tg3_asic_rev(tp) == ASIC_REV_5705) {
9248                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9249                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9250                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9251                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9252         }
9253
9254         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9255                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9256                 val = tp->mac_mode;
9257         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9258                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9259                 val = tp->mac_mode;
9260         } else
9261                 val = 0;
9262
9263         tw32_f(MAC_MODE, val);
9264         udelay(40);
9265
9266         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9267
9268         tg3_mdio_start(tp);
9269
9270         if (tg3_flag(tp, PCI_EXPRESS) &&
9271             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9272             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9273             !tg3_flag(tp, 57765_PLUS)) {
9274                 val = tr32(0x7c00);
9275
9276                 tw32(0x7c00, val | (1 << 25));
9277         }
9278
9279         tg3_restore_clk(tp);
9280
9281         /* Reprobe ASF enable state.  */
9282         tg3_flag_clear(tp, ENABLE_ASF);
9283         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9284                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9285
9286         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9287         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9288         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9289                 u32 nic_cfg;
9290
9291                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9292                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9293                         tg3_flag_set(tp, ENABLE_ASF);
9294                         tp->last_event_jiffies = jiffies;
9295                         if (tg3_flag(tp, 5750_PLUS))
9296                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9297
9298                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9299                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9300                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9301                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9302                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9303                 }
9304         }
9305
9306         return 0;
9307 }
9308
9309 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9310 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9311 static void __tg3_set_rx_mode(struct net_device *);
9312
9313 /* tp->lock is held. */
9314 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9315 {
9316         int err;
9317
9318         tg3_stop_fw(tp);
9319
9320         tg3_write_sig_pre_reset(tp, kind);
9321
9322         tg3_abort_hw(tp, silent);
9323         err = tg3_chip_reset(tp);
9324
9325         __tg3_set_mac_addr(tp, false);
9326
9327         tg3_write_sig_legacy(tp, kind);
9328         tg3_write_sig_post_reset(tp, kind);
9329
9330         if (tp->hw_stats) {
9331                 /* Save the stats across chip resets... */
9332                 tg3_get_nstats(tp, &tp->net_stats_prev);
9333                 tg3_get_estats(tp, &tp->estats_prev);
9334
9335                 /* And make sure the next sample is new data */
9336                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9337         }
9338
9339         return err;
9340 }
9341
9342 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9343 {
9344         struct tg3 *tp = netdev_priv(dev);
9345         struct sockaddr *addr = p;
9346         int err = 0;
9347         bool skip_mac_1 = false;
9348
9349         if (!is_valid_ether_addr(addr->sa_data))
9350                 return -EADDRNOTAVAIL;
9351
9352         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9353
9354         if (!netif_running(dev))
9355                 return 0;
9356
9357         if (tg3_flag(tp, ENABLE_ASF)) {
9358                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9359
9360                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9361                 addr0_low = tr32(MAC_ADDR_0_LOW);
9362                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9363                 addr1_low = tr32(MAC_ADDR_1_LOW);
9364
9365                 /* Skip MAC addr 1 if ASF is using it. */
9366                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9367                     !(addr1_high == 0 && addr1_low == 0))
9368                         skip_mac_1 = true;
9369         }
9370         spin_lock_bh(&tp->lock);
9371         __tg3_set_mac_addr(tp, skip_mac_1);
9372         __tg3_set_rx_mode(dev);
9373         spin_unlock_bh(&tp->lock);
9374
9375         return err;
9376 }
9377
9378 /* tp->lock is held. */
9379 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9380                            dma_addr_t mapping, u32 maxlen_flags,
9381                            u32 nic_addr)
9382 {
9383         tg3_write_mem(tp,
9384                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9385                       ((u64) mapping >> 32));
9386         tg3_write_mem(tp,
9387                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9388                       ((u64) mapping & 0xffffffff));
9389         tg3_write_mem(tp,
9390                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9391                        maxlen_flags);
9392
9393         if (!tg3_flag(tp, 5705_PLUS))
9394                 tg3_write_mem(tp,
9395                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9396                               nic_addr);
9397 }
9398
9399
9400 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9401 {
9402         int i = 0;
9403
9404         if (!tg3_flag(tp, ENABLE_TSS)) {
9405                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9406                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9407                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9408         } else {
9409                 tw32(HOSTCC_TXCOL_TICKS, 0);
9410                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9411                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9412
9413                 for (; i < tp->txq_cnt; i++) {
9414                         u32 reg;
9415
9416                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9417                         tw32(reg, ec->tx_coalesce_usecs);
9418                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9419                         tw32(reg, ec->tx_max_coalesced_frames);
9420                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9421                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9422                 }
9423         }
9424
9425         for (; i < tp->irq_max - 1; i++) {
9426                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9427                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9428                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9429         }
9430 }
9431
9432 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9433 {
9434         int i = 0;
9435         u32 limit = tp->rxq_cnt;
9436
9437         if (!tg3_flag(tp, ENABLE_RSS)) {
9438                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9439                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9440                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9441                 limit--;
9442         } else {
9443                 tw32(HOSTCC_RXCOL_TICKS, 0);
9444                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9445                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9446         }
9447
9448         for (; i < limit; i++) {
9449                 u32 reg;
9450
9451                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9452                 tw32(reg, ec->rx_coalesce_usecs);
9453                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9454                 tw32(reg, ec->rx_max_coalesced_frames);
9455                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9456                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9457         }
9458
9459         for (; i < tp->irq_max - 1; i++) {
9460                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9461                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9462                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9463         }
9464 }
9465
9466 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9467 {
9468         tg3_coal_tx_init(tp, ec);
9469         tg3_coal_rx_init(tp, ec);
9470
9471         if (!tg3_flag(tp, 5705_PLUS)) {
9472                 u32 val = ec->stats_block_coalesce_usecs;
9473
9474                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9475                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9476
9477                 if (!tp->link_up)
9478                         val = 0;
9479
9480                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9481         }
9482 }
9483
9484 /* tp->lock is held. */
9485 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9486 {
9487         u32 txrcb, limit;
9488
9489         /* Disable all transmit rings but the first. */
9490         if (!tg3_flag(tp, 5705_PLUS))
9491                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9492         else if (tg3_flag(tp, 5717_PLUS))
9493                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9494         else if (tg3_flag(tp, 57765_CLASS) ||
9495                  tg3_asic_rev(tp) == ASIC_REV_5762)
9496                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9497         else
9498                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9499
9500         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9501              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9502                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9503                               BDINFO_FLAGS_DISABLED);
9504 }
9505
9506 /* tp->lock is held. */
9507 static void tg3_tx_rcbs_init(struct tg3 *tp)
9508 {
9509         int i = 0;
9510         u32 txrcb = NIC_SRAM_SEND_RCB;
9511
9512         if (tg3_flag(tp, ENABLE_TSS))
9513                 i++;
9514
9515         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9516                 struct tg3_napi *tnapi = &tp->napi[i];
9517
9518                 if (!tnapi->tx_ring)
9519                         continue;
9520
9521                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9522                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9523                                NIC_SRAM_TX_BUFFER_DESC);
9524         }
9525 }
9526
9527 /* tp->lock is held. */
9528 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9529 {
9530         u32 rxrcb, limit;
9531
9532         /* Disable all receive return rings but the first. */
9533         if (tg3_flag(tp, 5717_PLUS))
9534                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9535         else if (!tg3_flag(tp, 5705_PLUS))
9536                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9537         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9538                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9539                  tg3_flag(tp, 57765_CLASS))
9540                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9541         else
9542                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9543
9544         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9545              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9546                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9547                               BDINFO_FLAGS_DISABLED);
9548 }
9549
9550 /* tp->lock is held. */
9551 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9552 {
9553         int i = 0;
9554         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9555
9556         if (tg3_flag(tp, ENABLE_RSS))
9557                 i++;
9558
9559         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9560                 struct tg3_napi *tnapi = &tp->napi[i];
9561
9562                 if (!tnapi->rx_rcb)
9563                         continue;
9564
9565                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9566                                (tp->rx_ret_ring_mask + 1) <<
9567                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9568         }
9569 }
9570
9571 /* tp->lock is held. */
9572 static void tg3_rings_reset(struct tg3 *tp)
9573 {
9574         int i;
9575         u32 stblk;
9576         struct tg3_napi *tnapi = &tp->napi[0];
9577
9578         tg3_tx_rcbs_disable(tp);
9579
9580         tg3_rx_ret_rcbs_disable(tp);
9581
9582         /* Disable interrupts */
9583         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9584         tp->napi[0].chk_msi_cnt = 0;
9585         tp->napi[0].last_rx_cons = 0;
9586         tp->napi[0].last_tx_cons = 0;
9587
9588         /* Zero mailbox registers. */
9589         if (tg3_flag(tp, SUPPORT_MSIX)) {
9590                 for (i = 1; i < tp->irq_max; i++) {
9591                         tp->napi[i].tx_prod = 0;
9592                         tp->napi[i].tx_cons = 0;
9593                         if (tg3_flag(tp, ENABLE_TSS))
9594                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9595                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9596                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9597                         tp->napi[i].chk_msi_cnt = 0;
9598                         tp->napi[i].last_rx_cons = 0;
9599                         tp->napi[i].last_tx_cons = 0;
9600                 }
9601                 if (!tg3_flag(tp, ENABLE_TSS))
9602                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9603         } else {
9604                 tp->napi[0].tx_prod = 0;
9605                 tp->napi[0].tx_cons = 0;
9606                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9607                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9608         }
9609
9610         /* Make sure the NIC-based send BD rings are disabled. */
9611         if (!tg3_flag(tp, 5705_PLUS)) {
9612                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9613                 for (i = 0; i < 16; i++)
9614                         tw32_tx_mbox(mbox + i * 8, 0);
9615         }
9616
9617         /* Clear status block in ram. */
9618         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9619
9620         /* Set status block DMA address */
9621         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9622              ((u64) tnapi->status_mapping >> 32));
9623         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9624              ((u64) tnapi->status_mapping & 0xffffffff));
9625
9626         stblk = HOSTCC_STATBLCK_RING1;
9627
9628         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9629                 u64 mapping = (u64)tnapi->status_mapping;
9630                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9631                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9632                 stblk += 8;
9633
9634                 /* Clear status block in ram. */
9635                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9636         }
9637
9638         tg3_tx_rcbs_init(tp);
9639         tg3_rx_ret_rcbs_init(tp);
9640 }
9641
9642 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9643 {
9644         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9645
9646         if (!tg3_flag(tp, 5750_PLUS) ||
9647             tg3_flag(tp, 5780_CLASS) ||
9648             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9649             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9650             tg3_flag(tp, 57765_PLUS))
9651                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9652         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9653                  tg3_asic_rev(tp) == ASIC_REV_5787)
9654                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9655         else
9656                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9657
9658         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9659         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9660
9661         val = min(nic_rep_thresh, host_rep_thresh);
9662         tw32(RCVBDI_STD_THRESH, val);
9663
9664         if (tg3_flag(tp, 57765_PLUS))
9665                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9666
9667         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9668                 return;
9669
9670         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9671
9672         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9673
9674         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9675         tw32(RCVBDI_JUMBO_THRESH, val);
9676
9677         if (tg3_flag(tp, 57765_PLUS))
9678                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9679 }
9680
9681 static inline u32 calc_crc(unsigned char *buf, int len)
9682 {
9683         u32 reg;
9684         u32 tmp;
9685         int j, k;
9686
9687         reg = 0xffffffff;
9688
9689         for (j = 0; j < len; j++) {
9690                 reg ^= buf[j];
9691
9692                 for (k = 0; k < 8; k++) {
9693                         tmp = reg & 0x01;
9694
9695                         reg >>= 1;
9696
9697                         if (tmp)
9698                                 reg ^= 0xedb88320;
9699                 }
9700         }
9701
9702         return ~reg;
9703 }
9704
9705 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9706 {
9707         /* accept or reject all multicast frames */
9708         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9709         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9710         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9711         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9712 }
9713
9714 static void __tg3_set_rx_mode(struct net_device *dev)
9715 {
9716         struct tg3 *tp = netdev_priv(dev);
9717         u32 rx_mode;
9718
9719         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9720                                   RX_MODE_KEEP_VLAN_TAG);
9721
9722 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9723         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9724          * flag clear.
9725          */
9726         if (!tg3_flag(tp, ENABLE_ASF))
9727                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9728 #endif
9729
9730         if (dev->flags & IFF_PROMISC) {
9731                 /* Promiscuous mode. */
9732                 rx_mode |= RX_MODE_PROMISC;
9733         } else if (dev->flags & IFF_ALLMULTI) {
9734                 /* Accept all multicast. */
9735                 tg3_set_multi(tp, 1);
9736         } else if (netdev_mc_empty(dev)) {
9737                 /* Reject all multicast. */
9738                 tg3_set_multi(tp, 0);
9739         } else {
9740                 /* Accept one or more multicast(s). */
9741                 struct netdev_hw_addr *ha;
9742                 u32 mc_filter[4] = { 0, };
9743                 u32 regidx;
9744                 u32 bit;
9745                 u32 crc;
9746
9747                 netdev_for_each_mc_addr(ha, dev) {
9748                         crc = calc_crc(ha->addr, ETH_ALEN);
9749                         bit = ~crc & 0x7f;
9750                         regidx = (bit & 0x60) >> 5;
9751                         bit &= 0x1f;
9752                         mc_filter[regidx] |= (1 << bit);
9753                 }
9754
9755                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9756                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9757                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9758                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9759         }
9760
9761         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9762                 rx_mode |= RX_MODE_PROMISC;
9763         } else if (!(dev->flags & IFF_PROMISC)) {
9764                 /* Add all entries into to the mac addr filter list */
9765                 int i = 0;
9766                 struct netdev_hw_addr *ha;
9767
9768                 netdev_for_each_uc_addr(ha, dev) {
9769                         __tg3_set_one_mac_addr(tp, ha->addr,
9770                                                i + TG3_UCAST_ADDR_IDX(tp));
9771                         i++;
9772                 }
9773         }
9774
9775         if (rx_mode != tp->rx_mode) {
9776                 tp->rx_mode = rx_mode;
9777                 tw32_f(MAC_RX_MODE, rx_mode);
9778                 udelay(10);
9779         }
9780 }
9781
9782 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9783 {
9784         int i;
9785
9786         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9787                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9788 }
9789
9790 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9791 {
9792         int i;
9793
9794         if (!tg3_flag(tp, SUPPORT_MSIX))
9795                 return;
9796
9797         if (tp->rxq_cnt == 1) {
9798                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9799                 return;
9800         }
9801
9802         /* Validate table against current IRQ count */
9803         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9804                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9805                         break;
9806         }
9807
9808         if (i != TG3_RSS_INDIR_TBL_SIZE)
9809                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9810 }
9811
9812 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9813 {
9814         int i = 0;
9815         u32 reg = MAC_RSS_INDIR_TBL_0;
9816
9817         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9818                 u32 val = tp->rss_ind_tbl[i];
9819                 i++;
9820                 for (; i % 8; i++) {
9821                         val <<= 4;
9822                         val |= tp->rss_ind_tbl[i];
9823                 }
9824                 tw32(reg, val);
9825                 reg += 4;
9826         }
9827 }
9828
9829 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9830 {
9831         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9832                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9833         else
9834                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9835 }
9836
9837 /* tp->lock is held. */
9838 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9839 {
9840         u32 val, rdmac_mode;
9841         int i, err, limit;
9842         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9843
9844         tg3_disable_ints(tp);
9845
9846         tg3_stop_fw(tp);
9847
9848         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9849
9850         if (tg3_flag(tp, INIT_COMPLETE))
9851                 tg3_abort_hw(tp, 1);
9852
9853         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9854             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9855                 tg3_phy_pull_config(tp);
9856                 tg3_eee_pull_config(tp, NULL);
9857                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9858         }
9859
9860         /* Enable MAC control of LPI */
9861         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9862                 tg3_setup_eee(tp);
9863
9864         if (reset_phy)
9865                 tg3_phy_reset(tp);
9866
9867         err = tg3_chip_reset(tp);
9868         if (err)
9869                 return err;
9870
9871         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9872
9873         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9874                 val = tr32(TG3_CPMU_CTRL);
9875                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9876                 tw32(TG3_CPMU_CTRL, val);
9877
9878                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9879                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9880                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9881                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9882
9883                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9884                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9885                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9886                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9887
9888                 val = tr32(TG3_CPMU_HST_ACC);
9889                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9890                 val |= CPMU_HST_ACC_MACCLK_6_25;
9891                 tw32(TG3_CPMU_HST_ACC, val);
9892         }
9893
9894         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9895                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9896                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9897                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9898                 tw32(PCIE_PWR_MGMT_THRESH, val);
9899
9900                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9901                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9902
9903                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9904
9905                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9906                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9907         }
9908
9909         if (tg3_flag(tp, L1PLLPD_EN)) {
9910                 u32 grc_mode = tr32(GRC_MODE);
9911
9912                 /* Access the lower 1K of PL PCIE block registers. */
9913                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9914                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9915
9916                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9917                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9918                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9919
9920                 tw32(GRC_MODE, grc_mode);
9921         }
9922
9923         if (tg3_flag(tp, 57765_CLASS)) {
9924                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9925                         u32 grc_mode = tr32(GRC_MODE);
9926
9927                         /* Access the lower 1K of PL PCIE block registers. */
9928                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9929                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9930
9931                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9932                                    TG3_PCIE_PL_LO_PHYCTL5);
9933                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9934                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9935
9936                         tw32(GRC_MODE, grc_mode);
9937                 }
9938
9939                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9940                         u32 grc_mode;
9941
9942                         /* Fix transmit hangs */
9943                         val = tr32(TG3_CPMU_PADRNG_CTL);
9944                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9945                         tw32(TG3_CPMU_PADRNG_CTL, val);
9946
9947                         grc_mode = tr32(GRC_MODE);
9948
9949                         /* Access the lower 1K of DL PCIE block registers. */
9950                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9951                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9952
9953                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9954                                    TG3_PCIE_DL_LO_FTSMAX);
9955                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9956                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9957                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9958
9959                         tw32(GRC_MODE, grc_mode);
9960                 }
9961
9962                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9963                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9964                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9965                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9966         }
9967
9968         /* This works around an issue with Athlon chipsets on
9969          * B3 tigon3 silicon.  This bit has no effect on any
9970          * other revision.  But do not set this on PCI Express
9971          * chips and don't even touch the clocks if the CPMU is present.
9972          */
9973         if (!tg3_flag(tp, CPMU_PRESENT)) {
9974                 if (!tg3_flag(tp, PCI_EXPRESS))
9975                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9976                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9977         }
9978
9979         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9980             tg3_flag(tp, PCIX_MODE)) {
9981                 val = tr32(TG3PCI_PCISTATE);
9982                 val |= PCISTATE_RETRY_SAME_DMA;
9983                 tw32(TG3PCI_PCISTATE, val);
9984         }
9985
9986         if (tg3_flag(tp, ENABLE_APE)) {
9987                 /* Allow reads and writes to the
9988                  * APE register and memory space.
9989                  */
9990                 val = tr32(TG3PCI_PCISTATE);
9991                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9992                        PCISTATE_ALLOW_APE_SHMEM_WR |
9993                        PCISTATE_ALLOW_APE_PSPACE_WR;
9994                 tw32(TG3PCI_PCISTATE, val);
9995         }
9996
9997         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9998                 /* Enable some hw fixes.  */
9999                 val = tr32(TG3PCI_MSI_DATA);
10000                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10001                 tw32(TG3PCI_MSI_DATA, val);
10002         }
10003
10004         /* Descriptor ring init may make accesses to the
10005          * NIC SRAM area to setup the TX descriptors, so we
10006          * can only do this after the hardware has been
10007          * successfully reset.
10008          */
10009         err = tg3_init_rings(tp);
10010         if (err)
10011                 return err;
10012
10013         if (tg3_flag(tp, 57765_PLUS)) {
10014                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10015                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10016                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10017                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10018                 if (!tg3_flag(tp, 57765_CLASS) &&
10019                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10020                     tg3_asic_rev(tp) != ASIC_REV_5762)
10021                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10022                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10023         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10024                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10025                 /* This value is determined during the probe time DMA
10026                  * engine test, tg3_test_dma.
10027                  */
10028                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10029         }
10030
10031         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10032                           GRC_MODE_4X_NIC_SEND_RINGS |
10033                           GRC_MODE_NO_TX_PHDR_CSUM |
10034                           GRC_MODE_NO_RX_PHDR_CSUM);
10035         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10036
10037         /* Pseudo-header checksum is done by hardware logic and not
10038          * the offload processers, so make the chip do the pseudo-
10039          * header checksums on receive.  For transmit it is more
10040          * convenient to do the pseudo-header checksum in software
10041          * as Linux does that on transmit for us in all cases.
10042          */
10043         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10044
10045         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10046         if (tp->rxptpctl)
10047                 tw32(TG3_RX_PTP_CTL,
10048                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10049
10050         if (tg3_flag(tp, PTP_CAPABLE))
10051                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10052
10053         tw32(GRC_MODE, tp->grc_mode | val);
10054
10055         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10056         val = tr32(GRC_MISC_CFG);
10057         val &= ~0xff;
10058         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10059         tw32(GRC_MISC_CFG, val);
10060
10061         /* Initialize MBUF/DESC pool. */
10062         if (tg3_flag(tp, 5750_PLUS)) {
10063                 /* Do nothing.  */
10064         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10065                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10066                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10067                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10068                 else
10069                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10070                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10071                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10072         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10073                 int fw_len;
10074
10075                 fw_len = tp->fw_len;
10076                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10077                 tw32(BUFMGR_MB_POOL_ADDR,
10078                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10079                 tw32(BUFMGR_MB_POOL_SIZE,
10080                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10081         }
10082
10083         if (tp->dev->mtu <= ETH_DATA_LEN) {
10084                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10085                      tp->bufmgr_config.mbuf_read_dma_low_water);
10086                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10087                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10088                 tw32(BUFMGR_MB_HIGH_WATER,
10089                      tp->bufmgr_config.mbuf_high_water);
10090         } else {
10091                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10092                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10093                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10094                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10095                 tw32(BUFMGR_MB_HIGH_WATER,
10096                      tp->bufmgr_config.mbuf_high_water_jumbo);
10097         }
10098         tw32(BUFMGR_DMA_LOW_WATER,
10099              tp->bufmgr_config.dma_low_water);
10100         tw32(BUFMGR_DMA_HIGH_WATER,
10101              tp->bufmgr_config.dma_high_water);
10102
10103         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10104         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10105                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10106         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10107             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10108             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10109             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10110                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10111         tw32(BUFMGR_MODE, val);
10112         for (i = 0; i < 2000; i++) {
10113                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10114                         break;
10115                 udelay(10);
10116         }
10117         if (i >= 2000) {
10118                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10119                 return -ENODEV;
10120         }
10121
10122         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10123                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10124
10125         tg3_setup_rxbd_thresholds(tp);
10126
10127         /* Initialize TG3_BDINFO's at:
10128          *  RCVDBDI_STD_BD:     standard eth size rx ring
10129          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10130          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10131          *
10132          * like so:
10133          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10134          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10135          *                              ring attribute flags
10136          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10137          *
10138          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10139          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10140          *
10141          * The size of each ring is fixed in the firmware, but the location is
10142          * configurable.
10143          */
10144         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10145              ((u64) tpr->rx_std_mapping >> 32));
10146         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10147              ((u64) tpr->rx_std_mapping & 0xffffffff));
10148         if (!tg3_flag(tp, 5717_PLUS))
10149                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10150                      NIC_SRAM_RX_BUFFER_DESC);
10151
10152         /* Disable the mini ring */
10153         if (!tg3_flag(tp, 5705_PLUS))
10154                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10155                      BDINFO_FLAGS_DISABLED);
10156
10157         /* Program the jumbo buffer descriptor ring control
10158          * blocks on those devices that have them.
10159          */
10160         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10161             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10162
10163                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10164                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10165                              ((u64) tpr->rx_jmb_mapping >> 32));
10166                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10167                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10168                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10169                               BDINFO_FLAGS_MAXLEN_SHIFT;
10170                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10171                              val | BDINFO_FLAGS_USE_EXT_RECV);
10172                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10173                             tg3_flag(tp, 57765_CLASS) ||
10174                             tg3_asic_rev(tp) == ASIC_REV_5762)
10175                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10176                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10177                 } else {
10178                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10179                              BDINFO_FLAGS_DISABLED);
10180                 }
10181
10182                 if (tg3_flag(tp, 57765_PLUS)) {
10183                         val = TG3_RX_STD_RING_SIZE(tp);
10184                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10185                         val |= (TG3_RX_STD_DMA_SZ << 2);
10186                 } else
10187                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10188         } else
10189                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10190
10191         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10192
10193         tpr->rx_std_prod_idx = tp->rx_pending;
10194         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10195
10196         tpr->rx_jmb_prod_idx =
10197                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10198         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10199
10200         tg3_rings_reset(tp);
10201
10202         /* Initialize MAC address and backoff seed. */
10203         __tg3_set_mac_addr(tp, false);
10204
10205         /* MTU + ethernet header + FCS + optional VLAN tag */
10206         tw32(MAC_RX_MTU_SIZE,
10207              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10208
10209         /* The slot time is changed by tg3_setup_phy if we
10210          * run at gigabit with half duplex.
10211          */
10212         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10213               (6 << TX_LENGTHS_IPG_SHIFT) |
10214               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10215
10216         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10217             tg3_asic_rev(tp) == ASIC_REV_5762)
10218                 val |= tr32(MAC_TX_LENGTHS) &
10219                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10220                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10221
10222         tw32(MAC_TX_LENGTHS, val);
10223
10224         /* Receive rules. */
10225         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10226         tw32(RCVLPC_CONFIG, 0x0181);
10227
10228         /* Calculate RDMAC_MODE setting early, we need it to determine
10229          * the RCVLPC_STATE_ENABLE mask.
10230          */
10231         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10232                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10233                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10234                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10235                       RDMAC_MODE_LNGREAD_ENAB);
10236
10237         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10238                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10239
10240         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10241             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10242             tg3_asic_rev(tp) == ASIC_REV_57780)
10243                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10244                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10245                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10246
10247         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10248             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10249                 if (tg3_flag(tp, TSO_CAPABLE) &&
10250                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10251                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10252                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10253                            !tg3_flag(tp, IS_5788)) {
10254                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10255                 }
10256         }
10257
10258         if (tg3_flag(tp, PCI_EXPRESS))
10259                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10260
10261         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10262                 tp->dma_limit = 0;
10263                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10264                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10265                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10266                 }
10267         }
10268
10269         if (tg3_flag(tp, HW_TSO_1) ||
10270             tg3_flag(tp, HW_TSO_2) ||
10271             tg3_flag(tp, HW_TSO_3))
10272                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10273
10274         if (tg3_flag(tp, 57765_PLUS) ||
10275             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10276             tg3_asic_rev(tp) == ASIC_REV_57780)
10277                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10278
10279         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10280             tg3_asic_rev(tp) == ASIC_REV_5762)
10281                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10282
10283         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10284             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10285             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10286             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10287             tg3_flag(tp, 57765_PLUS)) {
10288                 u32 tgtreg;
10289
10290                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10291                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10292                 else
10293                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10294
10295                 val = tr32(tgtreg);
10296                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10297                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10298                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10299                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10300                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10301                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10302                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10303                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10304                 }
10305                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10306         }
10307
10308         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10309             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10310             tg3_asic_rev(tp) == ASIC_REV_5762) {
10311                 u32 tgtreg;
10312
10313                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10314                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10315                 else
10316                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10317
10318                 val = tr32(tgtreg);
10319                 tw32(tgtreg, val |
10320                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10321                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10322         }
10323
10324         /* Receive/send statistics. */
10325         if (tg3_flag(tp, 5750_PLUS)) {
10326                 val = tr32(RCVLPC_STATS_ENABLE);
10327                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10328                 tw32(RCVLPC_STATS_ENABLE, val);
10329         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10330                    tg3_flag(tp, TSO_CAPABLE)) {
10331                 val = tr32(RCVLPC_STATS_ENABLE);
10332                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10333                 tw32(RCVLPC_STATS_ENABLE, val);
10334         } else {
10335                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10336         }
10337         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10338         tw32(SNDDATAI_STATSENAB, 0xffffff);
10339         tw32(SNDDATAI_STATSCTRL,
10340              (SNDDATAI_SCTRL_ENABLE |
10341               SNDDATAI_SCTRL_FASTUPD));
10342
10343         /* Setup host coalescing engine. */
10344         tw32(HOSTCC_MODE, 0);
10345         for (i = 0; i < 2000; i++) {
10346                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10347                         break;
10348                 udelay(10);
10349         }
10350
10351         __tg3_set_coalesce(tp, &tp->coal);
10352
10353         if (!tg3_flag(tp, 5705_PLUS)) {
10354                 /* Status/statistics block address.  See tg3_timer,
10355                  * the tg3_periodic_fetch_stats call there, and
10356                  * tg3_get_stats to see how this works for 5705/5750 chips.
10357                  */
10358                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10359                      ((u64) tp->stats_mapping >> 32));
10360                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10361                      ((u64) tp->stats_mapping & 0xffffffff));
10362                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10363
10364                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10365
10366                 /* Clear statistics and status block memory areas */
10367                 for (i = NIC_SRAM_STATS_BLK;
10368                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10369                      i += sizeof(u32)) {
10370                         tg3_write_mem(tp, i, 0);
10371                         udelay(40);
10372                 }
10373         }
10374
10375         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10376
10377         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10378         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10379         if (!tg3_flag(tp, 5705_PLUS))
10380                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10381
10382         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10383                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10384                 /* reset to prevent losing 1st rx packet intermittently */
10385                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10386                 udelay(10);
10387         }
10388
10389         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10390                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10391                         MAC_MODE_FHDE_ENABLE;
10392         if (tg3_flag(tp, ENABLE_APE))
10393                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10394         if (!tg3_flag(tp, 5705_PLUS) &&
10395             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10396             tg3_asic_rev(tp) != ASIC_REV_5700)
10397                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10398         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10399         udelay(40);
10400
10401         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10402          * If TG3_FLAG_IS_NIC is zero, we should read the
10403          * register to preserve the GPIO settings for LOMs. The GPIOs,
10404          * whether used as inputs or outputs, are set by boot code after
10405          * reset.
10406          */
10407         if (!tg3_flag(tp, IS_NIC)) {
10408                 u32 gpio_mask;
10409
10410                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10411                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10412                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10413
10414                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10415                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10416                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10417
10418                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10419                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10420
10421                 tp->grc_local_ctrl &= ~gpio_mask;
10422                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10423
10424                 /* GPIO1 must be driven high for eeprom write protect */
10425                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10426                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10427                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10428         }
10429         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10430         udelay(100);
10431
10432         if (tg3_flag(tp, USING_MSIX)) {
10433                 val = tr32(MSGINT_MODE);
10434                 val |= MSGINT_MODE_ENABLE;
10435                 if (tp->irq_cnt > 1)
10436                         val |= MSGINT_MODE_MULTIVEC_EN;
10437                 if (!tg3_flag(tp, 1SHOT_MSI))
10438                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10439                 tw32(MSGINT_MODE, val);
10440         }
10441
10442         if (!tg3_flag(tp, 5705_PLUS)) {
10443                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10444                 udelay(40);
10445         }
10446
10447         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10448                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10449                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10450                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10451                WDMAC_MODE_LNGREAD_ENAB);
10452
10453         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10454             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10455                 if (tg3_flag(tp, TSO_CAPABLE) &&
10456                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10457                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10458                         /* nothing */
10459                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10460                            !tg3_flag(tp, IS_5788)) {
10461                         val |= WDMAC_MODE_RX_ACCEL;
10462                 }
10463         }
10464
10465         /* Enable host coalescing bug fix */
10466         if (tg3_flag(tp, 5755_PLUS))
10467                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10468
10469         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10470                 val |= WDMAC_MODE_BURST_ALL_DATA;
10471
10472         tw32_f(WDMAC_MODE, val);
10473         udelay(40);
10474
10475         if (tg3_flag(tp, PCIX_MODE)) {
10476                 u16 pcix_cmd;
10477
10478                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10479                                      &pcix_cmd);
10480                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10481                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10482                         pcix_cmd |= PCI_X_CMD_READ_2K;
10483                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10484                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10485                         pcix_cmd |= PCI_X_CMD_READ_2K;
10486                 }
10487                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10488                                       pcix_cmd);
10489         }
10490
10491         tw32_f(RDMAC_MODE, rdmac_mode);
10492         udelay(40);
10493
10494         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10495             tg3_asic_rev(tp) == ASIC_REV_5720) {
10496                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10497                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10498                                 break;
10499                 }
10500                 if (i < TG3_NUM_RDMA_CHANNELS) {
10501                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10502                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10503                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10504                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10505                 }
10506         }
10507
10508         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10509         if (!tg3_flag(tp, 5705_PLUS))
10510                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10511
10512         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10513                 tw32(SNDDATAC_MODE,
10514                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10515         else
10516                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10517
10518         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10519         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10520         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10521         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10522                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10523         tw32(RCVDBDI_MODE, val);
10524         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10525         if (tg3_flag(tp, HW_TSO_1) ||
10526             tg3_flag(tp, HW_TSO_2) ||
10527             tg3_flag(tp, HW_TSO_3))
10528                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10529         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10530         if (tg3_flag(tp, ENABLE_TSS))
10531                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10532         tw32(SNDBDI_MODE, val);
10533         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10534
10535         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10536                 err = tg3_load_5701_a0_firmware_fix(tp);
10537                 if (err)
10538                         return err;
10539         }
10540
10541         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10542                 /* Ignore any errors for the firmware download. If download
10543                  * fails, the device will operate with EEE disabled
10544                  */
10545                 tg3_load_57766_firmware(tp);
10546         }
10547
10548         if (tg3_flag(tp, TSO_CAPABLE)) {
10549                 err = tg3_load_tso_firmware(tp);
10550                 if (err)
10551                         return err;
10552         }
10553
10554         tp->tx_mode = TX_MODE_ENABLE;
10555
10556         if (tg3_flag(tp, 5755_PLUS) ||
10557             tg3_asic_rev(tp) == ASIC_REV_5906)
10558                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10559
10560         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10561             tg3_asic_rev(tp) == ASIC_REV_5762) {
10562                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10563                 tp->tx_mode &= ~val;
10564                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10565         }
10566
10567         tw32_f(MAC_TX_MODE, tp->tx_mode);
10568         udelay(100);
10569
10570         if (tg3_flag(tp, ENABLE_RSS)) {
10571                 u32 rss_key[10];
10572
10573                 tg3_rss_write_indir_tbl(tp);
10574
10575                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10576
10577                 for (i = 0; i < 10 ; i++)
10578                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10579         }
10580
10581         tp->rx_mode = RX_MODE_ENABLE;
10582         if (tg3_flag(tp, 5755_PLUS))
10583                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10584
10585         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10586                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10587
10588         if (tg3_flag(tp, ENABLE_RSS))
10589                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10590                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10591                                RX_MODE_RSS_IPV6_HASH_EN |
10592                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10593                                RX_MODE_RSS_IPV4_HASH_EN |
10594                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10595
10596         tw32_f(MAC_RX_MODE, tp->rx_mode);
10597         udelay(10);
10598
10599         tw32(MAC_LED_CTRL, tp->led_ctrl);
10600
10601         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10602         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10603                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10604                 udelay(10);
10605         }
10606         tw32_f(MAC_RX_MODE, tp->rx_mode);
10607         udelay(10);
10608
10609         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10610                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10611                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10612                         /* Set drive transmission level to 1.2V  */
10613                         /* only if the signal pre-emphasis bit is not set  */
10614                         val = tr32(MAC_SERDES_CFG);
10615                         val &= 0xfffff000;
10616                         val |= 0x880;
10617                         tw32(MAC_SERDES_CFG, val);
10618                 }
10619                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10620                         tw32(MAC_SERDES_CFG, 0x616000);
10621         }
10622
10623         /* Prevent chip from dropping frames when flow control
10624          * is enabled.
10625          */
10626         if (tg3_flag(tp, 57765_CLASS))
10627                 val = 1;
10628         else
10629                 val = 2;
10630         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10631
10632         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10633             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10634                 /* Use hardware link auto-negotiation */
10635                 tg3_flag_set(tp, HW_AUTONEG);
10636         }
10637
10638         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10639             tg3_asic_rev(tp) == ASIC_REV_5714) {
10640                 u32 tmp;
10641
10642                 tmp = tr32(SERDES_RX_CTRL);
10643                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10644                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10645                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10646                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10647         }
10648
10649         if (!tg3_flag(tp, USE_PHYLIB)) {
10650                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10651                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10652
10653                 err = tg3_setup_phy(tp, false);
10654                 if (err)
10655                         return err;
10656
10657                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10658                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10659                         u32 tmp;
10660
10661                         /* Clear CRC stats. */
10662                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10663                                 tg3_writephy(tp, MII_TG3_TEST1,
10664                                              tmp | MII_TG3_TEST1_CRC_EN);
10665                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10666                         }
10667                 }
10668         }
10669
10670         __tg3_set_rx_mode(tp->dev);
10671
10672         /* Initialize receive rules. */
10673         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10674         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10675         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10676         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10677
10678         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10679                 limit = 8;
10680         else
10681                 limit = 16;
10682         if (tg3_flag(tp, ENABLE_ASF))
10683                 limit -= 4;
10684         switch (limit) {
10685         case 16:
10686                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10687         case 15:
10688                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10689         case 14:
10690                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10691         case 13:
10692                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10693         case 12:
10694                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10695         case 11:
10696                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10697         case 10:
10698                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10699         case 9:
10700                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10701         case 8:
10702                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10703         case 7:
10704                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10705         case 6:
10706                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10707         case 5:
10708                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10709         case 4:
10710                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10711         case 3:
10712                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10713         case 2:
10714         case 1:
10715
10716         default:
10717                 break;
10718         }
10719
10720         if (tg3_flag(tp, ENABLE_APE))
10721                 /* Write our heartbeat update interval to APE. */
10722                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10723                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10724
10725         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10726
10727         return 0;
10728 }
10729
10730 /* Called at device open time to get the chip ready for
10731  * packet processing.  Invoked with tp->lock held.
10732  */
10733 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10734 {
10735         /* Chip may have been just powered on. If so, the boot code may still
10736          * be running initialization. Wait for it to finish to avoid races in
10737          * accessing the hardware.
10738          */
10739         tg3_enable_register_access(tp);
10740         tg3_poll_fw(tp);
10741
10742         tg3_switch_clocks(tp);
10743
10744         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10745
10746         return tg3_reset_hw(tp, reset_phy);
10747 }
10748
10749 #ifdef CONFIG_TIGON3_HWMON
10750 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10751 {
10752         int i;
10753
10754         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10755                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10756
10757                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10758                 off += len;
10759
10760                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10761                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10762                         memset(ocir, 0, TG3_OCIR_LEN);
10763         }
10764 }
10765
10766 /* sysfs attributes for hwmon */
10767 static ssize_t tg3_show_temp(struct device *dev,
10768                              struct device_attribute *devattr, char *buf)
10769 {
10770         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10771         struct tg3 *tp = dev_get_drvdata(dev);
10772         u32 temperature;
10773
10774         spin_lock_bh(&tp->lock);
10775         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10776                                 sizeof(temperature));
10777         spin_unlock_bh(&tp->lock);
10778         return sprintf(buf, "%u\n", temperature * 1000);
10779 }
10780
10781
10782 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10783                           TG3_TEMP_SENSOR_OFFSET);
10784 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10785                           TG3_TEMP_CAUTION_OFFSET);
10786 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10787                           TG3_TEMP_MAX_OFFSET);
10788
10789 static struct attribute *tg3_attrs[] = {
10790         &sensor_dev_attr_temp1_input.dev_attr.attr,
10791         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10792         &sensor_dev_attr_temp1_max.dev_attr.attr,
10793         NULL
10794 };
10795 ATTRIBUTE_GROUPS(tg3);
10796
10797 static void tg3_hwmon_close(struct tg3 *tp)
10798 {
10799         if (tp->hwmon_dev) {
10800                 hwmon_device_unregister(tp->hwmon_dev);
10801                 tp->hwmon_dev = NULL;
10802         }
10803 }
10804
10805 static void tg3_hwmon_open(struct tg3 *tp)
10806 {
10807         int i;
10808         u32 size = 0;
10809         struct pci_dev *pdev = tp->pdev;
10810         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10811
10812         tg3_sd_scan_scratchpad(tp, ocirs);
10813
10814         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10815                 if (!ocirs[i].src_data_length)
10816                         continue;
10817
10818                 size += ocirs[i].src_hdr_length;
10819                 size += ocirs[i].src_data_length;
10820         }
10821
10822         if (!size)
10823                 return;
10824
10825         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10826                                                           tp, tg3_groups);
10827         if (IS_ERR(tp->hwmon_dev)) {
10828                 tp->hwmon_dev = NULL;
10829                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10830         }
10831 }
10832 #else
10833 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10834 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10835 #endif /* CONFIG_TIGON3_HWMON */
10836
10837
10838 #define TG3_STAT_ADD32(PSTAT, REG) \
10839 do {    u32 __val = tr32(REG); \
10840         (PSTAT)->low += __val; \
10841         if ((PSTAT)->low < __val) \
10842                 (PSTAT)->high += 1; \
10843 } while (0)
10844
10845 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10846 {
10847         struct tg3_hw_stats *sp = tp->hw_stats;
10848
10849         if (!tp->link_up)
10850                 return;
10851
10852         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10853         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10854         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10855         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10856         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10857         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10858         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10859         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10860         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10861         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10862         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10863         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10864         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10865         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10866                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10867                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10868                 u32 val;
10869
10870                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10871                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10872                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10873                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10874         }
10875
10876         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10877         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10878         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10879         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10880         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10881         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10882         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10883         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10884         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10885         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10886         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10887         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10888         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10889         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10890
10891         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10892         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10893             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10894             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10895             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10896                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10897         } else {
10898                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10899                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10900                 if (val) {
10901                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10902                         sp->rx_discards.low += val;
10903                         if (sp->rx_discards.low < val)
10904                                 sp->rx_discards.high += 1;
10905                 }
10906                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10907         }
10908         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10909 }
10910
10911 static void tg3_chk_missed_msi(struct tg3 *tp)
10912 {
10913         u32 i;
10914
10915         for (i = 0; i < tp->irq_cnt; i++) {
10916                 struct tg3_napi *tnapi = &tp->napi[i];
10917
10918                 if (tg3_has_work(tnapi)) {
10919                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10920                             tnapi->last_tx_cons == tnapi->tx_cons) {
10921                                 if (tnapi->chk_msi_cnt < 1) {
10922                                         tnapi->chk_msi_cnt++;
10923                                         return;
10924                                 }
10925                                 tg3_msi(0, tnapi);
10926                         }
10927                 }
10928                 tnapi->chk_msi_cnt = 0;
10929                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10930                 tnapi->last_tx_cons = tnapi->tx_cons;
10931         }
10932 }
10933
10934 static void tg3_timer(struct timer_list *t)
10935 {
10936         struct tg3 *tp = from_timer(tp, t, timer);
10937
10938         spin_lock(&tp->lock);
10939
10940         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10941                 spin_unlock(&tp->lock);
10942                 goto restart_timer;
10943         }
10944
10945         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10946             tg3_flag(tp, 57765_CLASS))
10947                 tg3_chk_missed_msi(tp);
10948
10949         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10950                 /* BCM4785: Flush posted writes from GbE to host memory. */
10951                 tr32(HOSTCC_MODE);
10952         }
10953
10954         if (!tg3_flag(tp, TAGGED_STATUS)) {
10955                 /* All of this garbage is because when using non-tagged
10956                  * IRQ status the mailbox/status_block protocol the chip
10957                  * uses with the cpu is race prone.
10958                  */
10959                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10960                         tw32(GRC_LOCAL_CTRL,
10961                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10962                 } else {
10963                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10964                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10965                 }
10966
10967                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10968                         spin_unlock(&tp->lock);
10969                         tg3_reset_task_schedule(tp);
10970                         goto restart_timer;
10971                 }
10972         }
10973
10974         /* This part only runs once per second. */
10975         if (!--tp->timer_counter) {
10976                 if (tg3_flag(tp, 5705_PLUS))
10977                         tg3_periodic_fetch_stats(tp);
10978
10979                 if (tp->setlpicnt && !--tp->setlpicnt)
10980                         tg3_phy_eee_enable(tp);
10981
10982                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10983                         u32 mac_stat;
10984                         int phy_event;
10985
10986                         mac_stat = tr32(MAC_STATUS);
10987
10988                         phy_event = 0;
10989                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10990                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10991                                         phy_event = 1;
10992                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10993                                 phy_event = 1;
10994
10995                         if (phy_event)
10996                                 tg3_setup_phy(tp, false);
10997                 } else if (tg3_flag(tp, POLL_SERDES)) {
10998                         u32 mac_stat = tr32(MAC_STATUS);
10999                         int need_setup = 0;
11000
11001                         if (tp->link_up &&
11002                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11003                                 need_setup = 1;
11004                         }
11005                         if (!tp->link_up &&
11006                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11007                                          MAC_STATUS_SIGNAL_DET))) {
11008                                 need_setup = 1;
11009                         }
11010                         if (need_setup) {
11011                                 if (!tp->serdes_counter) {
11012                                         tw32_f(MAC_MODE,
11013                                              (tp->mac_mode &
11014                                               ~MAC_MODE_PORT_MODE_MASK));
11015                                         udelay(40);
11016                                         tw32_f(MAC_MODE, tp->mac_mode);
11017                                         udelay(40);
11018                                 }
11019                                 tg3_setup_phy(tp, false);
11020                         }
11021                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11022                            tg3_flag(tp, 5780_CLASS)) {
11023                         tg3_serdes_parallel_detect(tp);
11024                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11025                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11026                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11027                                          TG3_CPMU_STATUS_LINK_MASK);
11028
11029                         if (link_up != tp->link_up)
11030                                 tg3_setup_phy(tp, false);
11031                 }
11032
11033                 tp->timer_counter = tp->timer_multiplier;
11034         }
11035
11036         /* Heartbeat is only sent once every 2 seconds.
11037          *
11038          * The heartbeat is to tell the ASF firmware that the host
11039          * driver is still alive.  In the event that the OS crashes,
11040          * ASF needs to reset the hardware to free up the FIFO space
11041          * that may be filled with rx packets destined for the host.
11042          * If the FIFO is full, ASF will no longer function properly.
11043          *
11044          * Unintended resets have been reported on real time kernels
11045          * where the timer doesn't run on time.  Netpoll will also have
11046          * same problem.
11047          *
11048          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11049          * to check the ring condition when the heartbeat is expiring
11050          * before doing the reset.  This will prevent most unintended
11051          * resets.
11052          */
11053         if (!--tp->asf_counter) {
11054                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11055                         tg3_wait_for_event_ack(tp);
11056
11057                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11058                                       FWCMD_NICDRV_ALIVE3);
11059                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11060                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11061                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11062
11063                         tg3_generate_fw_event(tp);
11064                 }
11065                 tp->asf_counter = tp->asf_multiplier;
11066         }
11067
11068         spin_unlock(&tp->lock);
11069
11070 restart_timer:
11071         tp->timer.expires = jiffies + tp->timer_offset;
11072         add_timer(&tp->timer);
11073 }
11074
11075 static void tg3_timer_init(struct tg3 *tp)
11076 {
11077         if (tg3_flag(tp, TAGGED_STATUS) &&
11078             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11079             !tg3_flag(tp, 57765_CLASS))
11080                 tp->timer_offset = HZ;
11081         else
11082                 tp->timer_offset = HZ / 10;
11083
11084         BUG_ON(tp->timer_offset > HZ);
11085
11086         tp->timer_multiplier = (HZ / tp->timer_offset);
11087         tp->asf_multiplier = (HZ / tp->timer_offset) *
11088                              TG3_FW_UPDATE_FREQ_SEC;
11089
11090         timer_setup(&tp->timer, tg3_timer, 0);
11091 }
11092
11093 static void tg3_timer_start(struct tg3 *tp)
11094 {
11095         tp->asf_counter   = tp->asf_multiplier;
11096         tp->timer_counter = tp->timer_multiplier;
11097
11098         tp->timer.expires = jiffies + tp->timer_offset;
11099         add_timer(&tp->timer);
11100 }
11101
11102 static void tg3_timer_stop(struct tg3 *tp)
11103 {
11104         del_timer_sync(&tp->timer);
11105 }
11106
11107 /* Restart hardware after configuration changes, self-test, etc.
11108  * Invoked with tp->lock held.
11109  */
11110 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11111         __releases(tp->lock)
11112         __acquires(tp->lock)
11113 {
11114         int err;
11115
11116         err = tg3_init_hw(tp, reset_phy);
11117         if (err) {
11118                 netdev_err(tp->dev,
11119                            "Failed to re-initialize device, aborting\n");
11120                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11121                 tg3_full_unlock(tp);
11122                 tg3_timer_stop(tp);
11123                 tp->irq_sync = 0;
11124                 tg3_napi_enable(tp);
11125                 dev_close(tp->dev);
11126                 tg3_full_lock(tp, 0);
11127         }
11128         return err;
11129 }
11130
11131 static void tg3_reset_task(struct work_struct *work)
11132 {
11133         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11134         int err;
11135
11136         rtnl_lock();
11137         tg3_full_lock(tp, 0);
11138
11139         if (!netif_running(tp->dev)) {
11140                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11141                 tg3_full_unlock(tp);
11142                 rtnl_unlock();
11143                 return;
11144         }
11145
11146         tg3_full_unlock(tp);
11147
11148         tg3_phy_stop(tp);
11149
11150         tg3_netif_stop(tp);
11151
11152         tg3_full_lock(tp, 1);
11153
11154         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11155                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11156                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11157                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11158                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11159         }
11160
11161         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11162         err = tg3_init_hw(tp, true);
11163         if (err)
11164                 goto out;
11165
11166         tg3_netif_start(tp);
11167
11168 out:
11169         tg3_full_unlock(tp);
11170
11171         if (!err)
11172                 tg3_phy_start(tp);
11173
11174         tg3_flag_clear(tp, RESET_TASK_PENDING);
11175         rtnl_unlock();
11176 }
11177
11178 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11179 {
11180         irq_handler_t fn;
11181         unsigned long flags;
11182         char *name;
11183         struct tg3_napi *tnapi = &tp->napi[irq_num];
11184
11185         if (tp->irq_cnt == 1)
11186                 name = tp->dev->name;
11187         else {
11188                 name = &tnapi->irq_lbl[0];
11189                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11190                         snprintf(name, IFNAMSIZ,
11191                                  "%s-txrx-%d", tp->dev->name, irq_num);
11192                 else if (tnapi->tx_buffers)
11193                         snprintf(name, IFNAMSIZ,
11194                                  "%s-tx-%d", tp->dev->name, irq_num);
11195                 else if (tnapi->rx_rcb)
11196                         snprintf(name, IFNAMSIZ,
11197                                  "%s-rx-%d", tp->dev->name, irq_num);
11198                 else
11199                         snprintf(name, IFNAMSIZ,
11200                                  "%s-%d", tp->dev->name, irq_num);
11201                 name[IFNAMSIZ-1] = 0;
11202         }
11203
11204         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11205                 fn = tg3_msi;
11206                 if (tg3_flag(tp, 1SHOT_MSI))
11207                         fn = tg3_msi_1shot;
11208                 flags = 0;
11209         } else {
11210                 fn = tg3_interrupt;
11211                 if (tg3_flag(tp, TAGGED_STATUS))
11212                         fn = tg3_interrupt_tagged;
11213                 flags = IRQF_SHARED;
11214         }
11215
11216         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11217 }
11218
11219 static int tg3_test_interrupt(struct tg3 *tp)
11220 {
11221         struct tg3_napi *tnapi = &tp->napi[0];
11222         struct net_device *dev = tp->dev;
11223         int err, i, intr_ok = 0;
11224         u32 val;
11225
11226         if (!netif_running(dev))
11227                 return -ENODEV;
11228
11229         tg3_disable_ints(tp);
11230
11231         free_irq(tnapi->irq_vec, tnapi);
11232
11233         /*
11234          * Turn off MSI one shot mode.  Otherwise this test has no
11235          * observable way to know whether the interrupt was delivered.
11236          */
11237         if (tg3_flag(tp, 57765_PLUS)) {
11238                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11239                 tw32(MSGINT_MODE, val);
11240         }
11241
11242         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11243                           IRQF_SHARED, dev->name, tnapi);
11244         if (err)
11245                 return err;
11246
11247         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11248         tg3_enable_ints(tp);
11249
11250         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11251                tnapi->coal_now);
11252
11253         for (i = 0; i < 5; i++) {
11254                 u32 int_mbox, misc_host_ctrl;
11255
11256                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11257                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11258
11259                 if ((int_mbox != 0) ||
11260                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11261                         intr_ok = 1;
11262                         break;
11263                 }
11264
11265                 if (tg3_flag(tp, 57765_PLUS) &&
11266                     tnapi->hw_status->status_tag != tnapi->last_tag)
11267                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11268
11269                 msleep(10);
11270         }
11271
11272         tg3_disable_ints(tp);
11273
11274         free_irq(tnapi->irq_vec, tnapi);
11275
11276         err = tg3_request_irq(tp, 0);
11277
11278         if (err)
11279                 return err;
11280
11281         if (intr_ok) {
11282                 /* Reenable MSI one shot mode. */
11283                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11284                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11285                         tw32(MSGINT_MODE, val);
11286                 }
11287                 return 0;
11288         }
11289
11290         return -EIO;
11291 }
11292
11293 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11294  * successfully restored
11295  */
11296 static int tg3_test_msi(struct tg3 *tp)
11297 {
11298         int err;
11299         u16 pci_cmd;
11300
11301         if (!tg3_flag(tp, USING_MSI))
11302                 return 0;
11303
11304         /* Turn off SERR reporting in case MSI terminates with Master
11305          * Abort.
11306          */
11307         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11308         pci_write_config_word(tp->pdev, PCI_COMMAND,
11309                               pci_cmd & ~PCI_COMMAND_SERR);
11310
11311         err = tg3_test_interrupt(tp);
11312
11313         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11314
11315         if (!err)
11316                 return 0;
11317
11318         /* other failures */
11319         if (err != -EIO)
11320                 return err;
11321
11322         /* MSI test failed, go back to INTx mode */
11323         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11324                     "to INTx mode. Please report this failure to the PCI "
11325                     "maintainer and include system chipset information\n");
11326
11327         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11328
11329         pci_disable_msi(tp->pdev);
11330
11331         tg3_flag_clear(tp, USING_MSI);
11332         tp->napi[0].irq_vec = tp->pdev->irq;
11333
11334         err = tg3_request_irq(tp, 0);
11335         if (err)
11336                 return err;
11337
11338         /* Need to reset the chip because the MSI cycle may have terminated
11339          * with Master Abort.
11340          */
11341         tg3_full_lock(tp, 1);
11342
11343         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11344         err = tg3_init_hw(tp, true);
11345
11346         tg3_full_unlock(tp);
11347
11348         if (err)
11349                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11350
11351         return err;
11352 }
11353
11354 static int tg3_request_firmware(struct tg3 *tp)
11355 {
11356         const struct tg3_firmware_hdr *fw_hdr;
11357
11358         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11359                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11360                            tp->fw_needed);
11361                 return -ENOENT;
11362         }
11363
11364         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11365
11366         /* Firmware blob starts with version numbers, followed by
11367          * start address and _full_ length including BSS sections
11368          * (which must be longer than the actual data, of course
11369          */
11370
11371         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11372         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11373                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11374                            tp->fw_len, tp->fw_needed);
11375                 release_firmware(tp->fw);
11376                 tp->fw = NULL;
11377                 return -EINVAL;
11378         }
11379
11380         /* We no longer need firmware; we have it. */
11381         tp->fw_needed = NULL;
11382         return 0;
11383 }
11384
11385 static u32 tg3_irq_count(struct tg3 *tp)
11386 {
11387         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11388
11389         if (irq_cnt > 1) {
11390                 /* We want as many rx rings enabled as there are cpus.
11391                  * In multiqueue MSI-X mode, the first MSI-X vector
11392                  * only deals with link interrupts, etc, so we add
11393                  * one to the number of vectors we are requesting.
11394                  */
11395                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11396         }
11397
11398         return irq_cnt;
11399 }
11400
11401 static bool tg3_enable_msix(struct tg3 *tp)
11402 {
11403         int i, rc;
11404         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11405
11406         tp->txq_cnt = tp->txq_req;
11407         tp->rxq_cnt = tp->rxq_req;
11408         if (!tp->rxq_cnt)
11409                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11410         if (tp->rxq_cnt > tp->rxq_max)
11411                 tp->rxq_cnt = tp->rxq_max;
11412
11413         /* Disable multiple TX rings by default.  Simple round-robin hardware
11414          * scheduling of the TX rings can cause starvation of rings with
11415          * small packets when other rings have TSO or jumbo packets.
11416          */
11417         if (!tp->txq_req)
11418                 tp->txq_cnt = 1;
11419
11420         tp->irq_cnt = tg3_irq_count(tp);
11421
11422         for (i = 0; i < tp->irq_max; i++) {
11423                 msix_ent[i].entry  = i;
11424                 msix_ent[i].vector = 0;
11425         }
11426
11427         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11428         if (rc < 0) {
11429                 return false;
11430         } else if (rc < tp->irq_cnt) {
11431                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11432                               tp->irq_cnt, rc);
11433                 tp->irq_cnt = rc;
11434                 tp->rxq_cnt = max(rc - 1, 1);
11435                 if (tp->txq_cnt)
11436                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11437         }
11438
11439         for (i = 0; i < tp->irq_max; i++)
11440                 tp->napi[i].irq_vec = msix_ent[i].vector;
11441
11442         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11443                 pci_disable_msix(tp->pdev);
11444                 return false;
11445         }
11446
11447         if (tp->irq_cnt == 1)
11448                 return true;
11449
11450         tg3_flag_set(tp, ENABLE_RSS);
11451
11452         if (tp->txq_cnt > 1)
11453                 tg3_flag_set(tp, ENABLE_TSS);
11454
11455         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11456
11457         return true;
11458 }
11459
11460 static void tg3_ints_init(struct tg3 *tp)
11461 {
11462         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11463             !tg3_flag(tp, TAGGED_STATUS)) {
11464                 /* All MSI supporting chips should support tagged
11465                  * status.  Assert that this is the case.
11466                  */
11467                 netdev_warn(tp->dev,
11468                             "MSI without TAGGED_STATUS? Not using MSI\n");
11469                 goto defcfg;
11470         }
11471
11472         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11473                 tg3_flag_set(tp, USING_MSIX);
11474         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11475                 tg3_flag_set(tp, USING_MSI);
11476
11477         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11478                 u32 msi_mode = tr32(MSGINT_MODE);
11479                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11480                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11481                 if (!tg3_flag(tp, 1SHOT_MSI))
11482                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11483                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11484         }
11485 defcfg:
11486         if (!tg3_flag(tp, USING_MSIX)) {
11487                 tp->irq_cnt = 1;
11488                 tp->napi[0].irq_vec = tp->pdev->irq;
11489         }
11490
11491         if (tp->irq_cnt == 1) {
11492                 tp->txq_cnt = 1;
11493                 tp->rxq_cnt = 1;
11494                 netif_set_real_num_tx_queues(tp->dev, 1);
11495                 netif_set_real_num_rx_queues(tp->dev, 1);
11496         }
11497 }
11498
11499 static void tg3_ints_fini(struct tg3 *tp)
11500 {
11501         if (tg3_flag(tp, USING_MSIX))
11502                 pci_disable_msix(tp->pdev);
11503         else if (tg3_flag(tp, USING_MSI))
11504                 pci_disable_msi(tp->pdev);
11505         tg3_flag_clear(tp, USING_MSI);
11506         tg3_flag_clear(tp, USING_MSIX);
11507         tg3_flag_clear(tp, ENABLE_RSS);
11508         tg3_flag_clear(tp, ENABLE_TSS);
11509 }
11510
11511 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11512                      bool init)
11513 {
11514         struct net_device *dev = tp->dev;
11515         int i, err;
11516
11517         /*
11518          * Setup interrupts first so we know how
11519          * many NAPI resources to allocate
11520          */
11521         tg3_ints_init(tp);
11522
11523         tg3_rss_check_indir_tbl(tp);
11524
11525         /* The placement of this call is tied
11526          * to the setup and use of Host TX descriptors.
11527          */
11528         err = tg3_alloc_consistent(tp);
11529         if (err)
11530                 goto out_ints_fini;
11531
11532         tg3_napi_init(tp);
11533
11534         tg3_napi_enable(tp);
11535
11536         for (i = 0; i < tp->irq_cnt; i++) {
11537                 err = tg3_request_irq(tp, i);
11538                 if (err) {
11539                         for (i--; i >= 0; i--) {
11540                                 struct tg3_napi *tnapi = &tp->napi[i];
11541
11542                                 free_irq(tnapi->irq_vec, tnapi);
11543                         }
11544                         goto out_napi_fini;
11545                 }
11546         }
11547
11548         tg3_full_lock(tp, 0);
11549
11550         if (init)
11551                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11552
11553         err = tg3_init_hw(tp, reset_phy);
11554         if (err) {
11555                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11556                 tg3_free_rings(tp);
11557         }
11558
11559         tg3_full_unlock(tp);
11560
11561         if (err)
11562                 goto out_free_irq;
11563
11564         if (test_irq && tg3_flag(tp, USING_MSI)) {
11565                 err = tg3_test_msi(tp);
11566
11567                 if (err) {
11568                         tg3_full_lock(tp, 0);
11569                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11570                         tg3_free_rings(tp);
11571                         tg3_full_unlock(tp);
11572
11573                         goto out_napi_fini;
11574                 }
11575
11576                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11577                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11578
11579                         tw32(PCIE_TRANSACTION_CFG,
11580                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11581                 }
11582         }
11583
11584         tg3_phy_start(tp);
11585
11586         tg3_hwmon_open(tp);
11587
11588         tg3_full_lock(tp, 0);
11589
11590         tg3_timer_start(tp);
11591         tg3_flag_set(tp, INIT_COMPLETE);
11592         tg3_enable_ints(tp);
11593
11594         tg3_ptp_resume(tp);
11595
11596         tg3_full_unlock(tp);
11597
11598         netif_tx_start_all_queues(dev);
11599
11600         /*
11601          * Reset loopback feature if it was turned on while the device was down
11602          * make sure that it's installed properly now.
11603          */
11604         if (dev->features & NETIF_F_LOOPBACK)
11605                 tg3_set_loopback(dev, dev->features);
11606
11607         return 0;
11608
11609 out_free_irq:
11610         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11611                 struct tg3_napi *tnapi = &tp->napi[i];
11612                 free_irq(tnapi->irq_vec, tnapi);
11613         }
11614
11615 out_napi_fini:
11616         tg3_napi_disable(tp);
11617         tg3_napi_fini(tp);
11618         tg3_free_consistent(tp);
11619
11620 out_ints_fini:
11621         tg3_ints_fini(tp);
11622
11623         return err;
11624 }
11625
11626 static void tg3_stop(struct tg3 *tp)
11627 {
11628         int i;
11629
11630         tg3_reset_task_cancel(tp);
11631         tg3_netif_stop(tp);
11632
11633         tg3_timer_stop(tp);
11634
11635         tg3_hwmon_close(tp);
11636
11637         tg3_phy_stop(tp);
11638
11639         tg3_full_lock(tp, 1);
11640
11641         tg3_disable_ints(tp);
11642
11643         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11644         tg3_free_rings(tp);
11645         tg3_flag_clear(tp, INIT_COMPLETE);
11646
11647         tg3_full_unlock(tp);
11648
11649         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11650                 struct tg3_napi *tnapi = &tp->napi[i];
11651                 free_irq(tnapi->irq_vec, tnapi);
11652         }
11653
11654         tg3_ints_fini(tp);
11655
11656         tg3_napi_fini(tp);
11657
11658         tg3_free_consistent(tp);
11659 }
11660
11661 static int tg3_open(struct net_device *dev)
11662 {
11663         struct tg3 *tp = netdev_priv(dev);
11664         int err;
11665
11666         if (tp->pcierr_recovery) {
11667                 netdev_err(dev, "Failed to open device. PCI error recovery "
11668                            "in progress\n");
11669                 return -EAGAIN;
11670         }
11671
11672         if (tp->fw_needed) {
11673                 err = tg3_request_firmware(tp);
11674                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11675                         if (err) {
11676                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11677                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11678                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11679                                 netdev_warn(tp->dev, "EEE capability restored\n");
11680                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11681                         }
11682                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11683                         if (err)
11684                                 return err;
11685                 } else if (err) {
11686                         netdev_warn(tp->dev, "TSO capability disabled\n");
11687                         tg3_flag_clear(tp, TSO_CAPABLE);
11688                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11689                         netdev_notice(tp->dev, "TSO capability restored\n");
11690                         tg3_flag_set(tp, TSO_CAPABLE);
11691                 }
11692         }
11693
11694         tg3_carrier_off(tp);
11695
11696         err = tg3_power_up(tp);
11697         if (err)
11698                 return err;
11699
11700         tg3_full_lock(tp, 0);
11701
11702         tg3_disable_ints(tp);
11703         tg3_flag_clear(tp, INIT_COMPLETE);
11704
11705         tg3_full_unlock(tp);
11706
11707         err = tg3_start(tp,
11708                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11709                         true, true);
11710         if (err) {
11711                 tg3_frob_aux_power(tp, false);
11712                 pci_set_power_state(tp->pdev, PCI_D3hot);
11713         }
11714
11715         return err;
11716 }
11717
11718 static int tg3_close(struct net_device *dev)
11719 {
11720         struct tg3 *tp = netdev_priv(dev);
11721
11722         if (tp->pcierr_recovery) {
11723                 netdev_err(dev, "Failed to close device. PCI error recovery "
11724                            "in progress\n");
11725                 return -EAGAIN;
11726         }
11727
11728         tg3_stop(tp);
11729
11730         if (pci_device_is_present(tp->pdev)) {
11731                 tg3_power_down_prepare(tp);
11732
11733                 tg3_carrier_off(tp);
11734         }
11735         return 0;
11736 }
11737
11738 static inline u64 get_stat64(tg3_stat64_t *val)
11739 {
11740        return ((u64)val->high << 32) | ((u64)val->low);
11741 }
11742
11743 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11744 {
11745         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11746
11747         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11748             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11749              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11750                 u32 val;
11751
11752                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11753                         tg3_writephy(tp, MII_TG3_TEST1,
11754                                      val | MII_TG3_TEST1_CRC_EN);
11755                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11756                 } else
11757                         val = 0;
11758
11759                 tp->phy_crc_errors += val;
11760
11761                 return tp->phy_crc_errors;
11762         }
11763
11764         return get_stat64(&hw_stats->rx_fcs_errors);
11765 }
11766
11767 #define ESTAT_ADD(member) \
11768         estats->member =        old_estats->member + \
11769                                 get_stat64(&hw_stats->member)
11770
11771 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11772 {
11773         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11774         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11775
11776         ESTAT_ADD(rx_octets);
11777         ESTAT_ADD(rx_fragments);
11778         ESTAT_ADD(rx_ucast_packets);
11779         ESTAT_ADD(rx_mcast_packets);
11780         ESTAT_ADD(rx_bcast_packets);
11781         ESTAT_ADD(rx_fcs_errors);
11782         ESTAT_ADD(rx_align_errors);
11783         ESTAT_ADD(rx_xon_pause_rcvd);
11784         ESTAT_ADD(rx_xoff_pause_rcvd);
11785         ESTAT_ADD(rx_mac_ctrl_rcvd);
11786         ESTAT_ADD(rx_xoff_entered);
11787         ESTAT_ADD(rx_frame_too_long_errors);
11788         ESTAT_ADD(rx_jabbers);
11789         ESTAT_ADD(rx_undersize_packets);
11790         ESTAT_ADD(rx_in_length_errors);
11791         ESTAT_ADD(rx_out_length_errors);
11792         ESTAT_ADD(rx_64_or_less_octet_packets);
11793         ESTAT_ADD(rx_65_to_127_octet_packets);
11794         ESTAT_ADD(rx_128_to_255_octet_packets);
11795         ESTAT_ADD(rx_256_to_511_octet_packets);
11796         ESTAT_ADD(rx_512_to_1023_octet_packets);
11797         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11798         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11799         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11800         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11801         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11802
11803         ESTAT_ADD(tx_octets);
11804         ESTAT_ADD(tx_collisions);
11805         ESTAT_ADD(tx_xon_sent);
11806         ESTAT_ADD(tx_xoff_sent);
11807         ESTAT_ADD(tx_flow_control);
11808         ESTAT_ADD(tx_mac_errors);
11809         ESTAT_ADD(tx_single_collisions);
11810         ESTAT_ADD(tx_mult_collisions);
11811         ESTAT_ADD(tx_deferred);
11812         ESTAT_ADD(tx_excessive_collisions);
11813         ESTAT_ADD(tx_late_collisions);
11814         ESTAT_ADD(tx_collide_2times);
11815         ESTAT_ADD(tx_collide_3times);
11816         ESTAT_ADD(tx_collide_4times);
11817         ESTAT_ADD(tx_collide_5times);
11818         ESTAT_ADD(tx_collide_6times);
11819         ESTAT_ADD(tx_collide_7times);
11820         ESTAT_ADD(tx_collide_8times);
11821         ESTAT_ADD(tx_collide_9times);
11822         ESTAT_ADD(tx_collide_10times);
11823         ESTAT_ADD(tx_collide_11times);
11824         ESTAT_ADD(tx_collide_12times);
11825         ESTAT_ADD(tx_collide_13times);
11826         ESTAT_ADD(tx_collide_14times);
11827         ESTAT_ADD(tx_collide_15times);
11828         ESTAT_ADD(tx_ucast_packets);
11829         ESTAT_ADD(tx_mcast_packets);
11830         ESTAT_ADD(tx_bcast_packets);
11831         ESTAT_ADD(tx_carrier_sense_errors);
11832         ESTAT_ADD(tx_discards);
11833         ESTAT_ADD(tx_errors);
11834
11835         ESTAT_ADD(dma_writeq_full);
11836         ESTAT_ADD(dma_write_prioq_full);
11837         ESTAT_ADD(rxbds_empty);
11838         ESTAT_ADD(rx_discards);
11839         ESTAT_ADD(rx_errors);
11840         ESTAT_ADD(rx_threshold_hit);
11841
11842         ESTAT_ADD(dma_readq_full);
11843         ESTAT_ADD(dma_read_prioq_full);
11844         ESTAT_ADD(tx_comp_queue_full);
11845
11846         ESTAT_ADD(ring_set_send_prod_index);
11847         ESTAT_ADD(ring_status_update);
11848         ESTAT_ADD(nic_irqs);
11849         ESTAT_ADD(nic_avoided_irqs);
11850         ESTAT_ADD(nic_tx_threshold_hit);
11851
11852         ESTAT_ADD(mbuf_lwm_thresh_hit);
11853 }
11854
11855 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11856 {
11857         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11858         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11859
11860         stats->rx_packets = old_stats->rx_packets +
11861                 get_stat64(&hw_stats->rx_ucast_packets) +
11862                 get_stat64(&hw_stats->rx_mcast_packets) +
11863                 get_stat64(&hw_stats->rx_bcast_packets);
11864
11865         stats->tx_packets = old_stats->tx_packets +
11866                 get_stat64(&hw_stats->tx_ucast_packets) +
11867                 get_stat64(&hw_stats->tx_mcast_packets) +
11868                 get_stat64(&hw_stats->tx_bcast_packets);
11869
11870         stats->rx_bytes = old_stats->rx_bytes +
11871                 get_stat64(&hw_stats->rx_octets);
11872         stats->tx_bytes = old_stats->tx_bytes +
11873                 get_stat64(&hw_stats->tx_octets);
11874
11875         stats->rx_errors = old_stats->rx_errors +
11876                 get_stat64(&hw_stats->rx_errors);
11877         stats->tx_errors = old_stats->tx_errors +
11878                 get_stat64(&hw_stats->tx_errors) +
11879                 get_stat64(&hw_stats->tx_mac_errors) +
11880                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11881                 get_stat64(&hw_stats->tx_discards);
11882
11883         stats->multicast = old_stats->multicast +
11884                 get_stat64(&hw_stats->rx_mcast_packets);
11885         stats->collisions = old_stats->collisions +
11886                 get_stat64(&hw_stats->tx_collisions);
11887
11888         stats->rx_length_errors = old_stats->rx_length_errors +
11889                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11890                 get_stat64(&hw_stats->rx_undersize_packets);
11891
11892         stats->rx_frame_errors = old_stats->rx_frame_errors +
11893                 get_stat64(&hw_stats->rx_align_errors);
11894         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11895                 get_stat64(&hw_stats->tx_discards);
11896         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11897                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11898
11899         stats->rx_crc_errors = old_stats->rx_crc_errors +
11900                 tg3_calc_crc_errors(tp);
11901
11902         stats->rx_missed_errors = old_stats->rx_missed_errors +
11903                 get_stat64(&hw_stats->rx_discards);
11904
11905         stats->rx_dropped = tp->rx_dropped;
11906         stats->tx_dropped = tp->tx_dropped;
11907 }
11908
11909 static int tg3_get_regs_len(struct net_device *dev)
11910 {
11911         return TG3_REG_BLK_SIZE;
11912 }
11913
11914 static void tg3_get_regs(struct net_device *dev,
11915                 struct ethtool_regs *regs, void *_p)
11916 {
11917         struct tg3 *tp = netdev_priv(dev);
11918
11919         regs->version = 0;
11920
11921         memset(_p, 0, TG3_REG_BLK_SIZE);
11922
11923         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11924                 return;
11925
11926         tg3_full_lock(tp, 0);
11927
11928         tg3_dump_legacy_regs(tp, (u32 *)_p);
11929
11930         tg3_full_unlock(tp);
11931 }
11932
11933 static int tg3_get_eeprom_len(struct net_device *dev)
11934 {
11935         struct tg3 *tp = netdev_priv(dev);
11936
11937         return tp->nvram_size;
11938 }
11939
11940 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11941 {
11942         struct tg3 *tp = netdev_priv(dev);
11943         int ret, cpmu_restore = 0;
11944         u8  *pd;
11945         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11946         __be32 val;
11947
11948         if (tg3_flag(tp, NO_NVRAM))
11949                 return -EINVAL;
11950
11951         offset = eeprom->offset;
11952         len = eeprom->len;
11953         eeprom->len = 0;
11954
11955         eeprom->magic = TG3_EEPROM_MAGIC;
11956
11957         /* Override clock, link aware and link idle modes */
11958         if (tg3_flag(tp, CPMU_PRESENT)) {
11959                 cpmu_val = tr32(TG3_CPMU_CTRL);
11960                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11961                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11962                         tw32(TG3_CPMU_CTRL, cpmu_val &
11963                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11964                                              CPMU_CTRL_LINK_IDLE_MODE));
11965                         cpmu_restore = 1;
11966                 }
11967         }
11968         tg3_override_clk(tp);
11969
11970         if (offset & 3) {
11971                 /* adjustments to start on required 4 byte boundary */
11972                 b_offset = offset & 3;
11973                 b_count = 4 - b_offset;
11974                 if (b_count > len) {
11975                         /* i.e. offset=1 len=2 */
11976                         b_count = len;
11977                 }
11978                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11979                 if (ret)
11980                         goto eeprom_done;
11981                 memcpy(data, ((char *)&val) + b_offset, b_count);
11982                 len -= b_count;
11983                 offset += b_count;
11984                 eeprom->len += b_count;
11985         }
11986
11987         /* read bytes up to the last 4 byte boundary */
11988         pd = &data[eeprom->len];
11989         for (i = 0; i < (len - (len & 3)); i += 4) {
11990                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11991                 if (ret) {
11992                         if (i)
11993                                 i -= 4;
11994                         eeprom->len += i;
11995                         goto eeprom_done;
11996                 }
11997                 memcpy(pd + i, &val, 4);
11998                 if (need_resched()) {
11999                         if (signal_pending(current)) {
12000                                 eeprom->len += i;
12001                                 ret = -EINTR;
12002                                 goto eeprom_done;
12003                         }
12004                         cond_resched();
12005                 }
12006         }
12007         eeprom->len += i;
12008
12009         if (len & 3) {
12010                 /* read last bytes not ending on 4 byte boundary */
12011                 pd = &data[eeprom->len];
12012                 b_count = len & 3;
12013                 b_offset = offset + len - b_count;
12014                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12015                 if (ret)
12016                         goto eeprom_done;
12017                 memcpy(pd, &val, b_count);
12018                 eeprom->len += b_count;
12019         }
12020         ret = 0;
12021
12022 eeprom_done:
12023         /* Restore clock, link aware and link idle modes */
12024         tg3_restore_clk(tp);
12025         if (cpmu_restore)
12026                 tw32(TG3_CPMU_CTRL, cpmu_val);
12027
12028         return ret;
12029 }
12030
12031 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12032 {
12033         struct tg3 *tp = netdev_priv(dev);
12034         int ret;
12035         u32 offset, len, b_offset, odd_len;
12036         u8 *buf;
12037         __be32 start = 0, end;
12038
12039         if (tg3_flag(tp, NO_NVRAM) ||
12040             eeprom->magic != TG3_EEPROM_MAGIC)
12041                 return -EINVAL;
12042
12043         offset = eeprom->offset;
12044         len = eeprom->len;
12045
12046         if ((b_offset = (offset & 3))) {
12047                 /* adjustments to start on required 4 byte boundary */
12048                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12049                 if (ret)
12050                         return ret;
12051                 len += b_offset;
12052                 offset &= ~3;
12053                 if (len < 4)
12054                         len = 4;
12055         }
12056
12057         odd_len = 0;
12058         if (len & 3) {
12059                 /* adjustments to end on required 4 byte boundary */
12060                 odd_len = 1;
12061                 len = (len + 3) & ~3;
12062                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12063                 if (ret)
12064                         return ret;
12065         }
12066
12067         buf = data;
12068         if (b_offset || odd_len) {
12069                 buf = kmalloc(len, GFP_KERNEL);
12070                 if (!buf)
12071                         return -ENOMEM;
12072                 if (b_offset)
12073                         memcpy(buf, &start, 4);
12074                 if (odd_len)
12075                         memcpy(buf+len-4, &end, 4);
12076                 memcpy(buf + b_offset, data, eeprom->len);
12077         }
12078
12079         ret = tg3_nvram_write_block(tp, offset, len, buf);
12080
12081         if (buf != data)
12082                 kfree(buf);
12083
12084         return ret;
12085 }
12086
12087 static int tg3_get_link_ksettings(struct net_device *dev,
12088                                   struct ethtool_link_ksettings *cmd)
12089 {
12090         struct tg3 *tp = netdev_priv(dev);
12091         u32 supported, advertising;
12092
12093         if (tg3_flag(tp, USE_PHYLIB)) {
12094                 struct phy_device *phydev;
12095                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12096                         return -EAGAIN;
12097                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12098                 phy_ethtool_ksettings_get(phydev, cmd);
12099
12100                 return 0;
12101         }
12102
12103         supported = (SUPPORTED_Autoneg);
12104
12105         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12106                 supported |= (SUPPORTED_1000baseT_Half |
12107                               SUPPORTED_1000baseT_Full);
12108
12109         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12110                 supported |= (SUPPORTED_100baseT_Half |
12111                               SUPPORTED_100baseT_Full |
12112                               SUPPORTED_10baseT_Half |
12113                               SUPPORTED_10baseT_Full |
12114                               SUPPORTED_TP);
12115                 cmd->base.port = PORT_TP;
12116         } else {
12117                 supported |= SUPPORTED_FIBRE;
12118                 cmd->base.port = PORT_FIBRE;
12119         }
12120         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12121                                                 supported);
12122
12123         advertising = tp->link_config.advertising;
12124         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12125                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12126                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12127                                 advertising |= ADVERTISED_Pause;
12128                         } else {
12129                                 advertising |= ADVERTISED_Pause |
12130                                         ADVERTISED_Asym_Pause;
12131                         }
12132                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12133                         advertising |= ADVERTISED_Asym_Pause;
12134                 }
12135         }
12136         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12137                                                 advertising);
12138
12139         if (netif_running(dev) && tp->link_up) {
12140                 cmd->base.speed = tp->link_config.active_speed;
12141                 cmd->base.duplex = tp->link_config.active_duplex;
12142                 ethtool_convert_legacy_u32_to_link_mode(
12143                         cmd->link_modes.lp_advertising,
12144                         tp->link_config.rmt_adv);
12145
12146                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12147                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12148                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12149                         else
12150                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12151                 }
12152         } else {
12153                 cmd->base.speed = SPEED_UNKNOWN;
12154                 cmd->base.duplex = DUPLEX_UNKNOWN;
12155                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12156         }
12157         cmd->base.phy_address = tp->phy_addr;
12158         cmd->base.autoneg = tp->link_config.autoneg;
12159         return 0;
12160 }
12161
12162 static int tg3_set_link_ksettings(struct net_device *dev,
12163                                   const struct ethtool_link_ksettings *cmd)
12164 {
12165         struct tg3 *tp = netdev_priv(dev);
12166         u32 speed = cmd->base.speed;
12167         u32 advertising;
12168
12169         if (tg3_flag(tp, USE_PHYLIB)) {
12170                 struct phy_device *phydev;
12171                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12172                         return -EAGAIN;
12173                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12174                 return phy_ethtool_ksettings_set(phydev, cmd);
12175         }
12176
12177         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12178             cmd->base.autoneg != AUTONEG_DISABLE)
12179                 return -EINVAL;
12180
12181         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12182             cmd->base.duplex != DUPLEX_FULL &&
12183             cmd->base.duplex != DUPLEX_HALF)
12184                 return -EINVAL;
12185
12186         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12187                                                 cmd->link_modes.advertising);
12188
12189         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12190                 u32 mask = ADVERTISED_Autoneg |
12191                            ADVERTISED_Pause |
12192                            ADVERTISED_Asym_Pause;
12193
12194                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12195                         mask |= ADVERTISED_1000baseT_Half |
12196                                 ADVERTISED_1000baseT_Full;
12197
12198                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12199                         mask |= ADVERTISED_100baseT_Half |
12200                                 ADVERTISED_100baseT_Full |
12201                                 ADVERTISED_10baseT_Half |
12202                                 ADVERTISED_10baseT_Full |
12203                                 ADVERTISED_TP;
12204                 else
12205                         mask |= ADVERTISED_FIBRE;
12206
12207                 if (advertising & ~mask)
12208                         return -EINVAL;
12209
12210                 mask &= (ADVERTISED_1000baseT_Half |
12211                          ADVERTISED_1000baseT_Full |
12212                          ADVERTISED_100baseT_Half |
12213                          ADVERTISED_100baseT_Full |
12214                          ADVERTISED_10baseT_Half |
12215                          ADVERTISED_10baseT_Full);
12216
12217                 advertising &= mask;
12218         } else {
12219                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12220                         if (speed != SPEED_1000)
12221                                 return -EINVAL;
12222
12223                         if (cmd->base.duplex != DUPLEX_FULL)
12224                                 return -EINVAL;
12225                 } else {
12226                         if (speed != SPEED_100 &&
12227                             speed != SPEED_10)
12228                                 return -EINVAL;
12229                 }
12230         }
12231
12232         tg3_full_lock(tp, 0);
12233
12234         tp->link_config.autoneg = cmd->base.autoneg;
12235         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12236                 tp->link_config.advertising = (advertising |
12237                                               ADVERTISED_Autoneg);
12238                 tp->link_config.speed = SPEED_UNKNOWN;
12239                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12240         } else {
12241                 tp->link_config.advertising = 0;
12242                 tp->link_config.speed = speed;
12243                 tp->link_config.duplex = cmd->base.duplex;
12244         }
12245
12246         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12247
12248         tg3_warn_mgmt_link_flap(tp);
12249
12250         if (netif_running(dev))
12251                 tg3_setup_phy(tp, true);
12252
12253         tg3_full_unlock(tp);
12254
12255         return 0;
12256 }
12257
12258 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12259 {
12260         struct tg3 *tp = netdev_priv(dev);
12261
12262         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12263         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12264         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12265         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12266 }
12267
12268 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12269 {
12270         struct tg3 *tp = netdev_priv(dev);
12271
12272         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12273                 wol->supported = WAKE_MAGIC;
12274         else
12275                 wol->supported = 0;
12276         wol->wolopts = 0;
12277         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12278                 wol->wolopts = WAKE_MAGIC;
12279         memset(&wol->sopass, 0, sizeof(wol->sopass));
12280 }
12281
12282 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12283 {
12284         struct tg3 *tp = netdev_priv(dev);
12285         struct device *dp = &tp->pdev->dev;
12286
12287         if (wol->wolopts & ~WAKE_MAGIC)
12288                 return -EINVAL;
12289         if ((wol->wolopts & WAKE_MAGIC) &&
12290             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12291                 return -EINVAL;
12292
12293         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12294
12295         if (device_may_wakeup(dp))
12296                 tg3_flag_set(tp, WOL_ENABLE);
12297         else
12298                 tg3_flag_clear(tp, WOL_ENABLE);
12299
12300         return 0;
12301 }
12302
12303 static u32 tg3_get_msglevel(struct net_device *dev)
12304 {
12305         struct tg3 *tp = netdev_priv(dev);
12306         return tp->msg_enable;
12307 }
12308
12309 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12310 {
12311         struct tg3 *tp = netdev_priv(dev);
12312         tp->msg_enable = value;
12313 }
12314
12315 static int tg3_nway_reset(struct net_device *dev)
12316 {
12317         struct tg3 *tp = netdev_priv(dev);
12318         int r;
12319
12320         if (!netif_running(dev))
12321                 return -EAGAIN;
12322
12323         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12324                 return -EINVAL;
12325
12326         tg3_warn_mgmt_link_flap(tp);
12327
12328         if (tg3_flag(tp, USE_PHYLIB)) {
12329                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12330                         return -EAGAIN;
12331                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12332         } else {
12333                 u32 bmcr;
12334
12335                 spin_lock_bh(&tp->lock);
12336                 r = -EINVAL;
12337                 tg3_readphy(tp, MII_BMCR, &bmcr);
12338                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12339                     ((bmcr & BMCR_ANENABLE) ||
12340                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12341                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12342                                                    BMCR_ANENABLE);
12343                         r = 0;
12344                 }
12345                 spin_unlock_bh(&tp->lock);
12346         }
12347
12348         return r;
12349 }
12350
12351 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12352 {
12353         struct tg3 *tp = netdev_priv(dev);
12354
12355         ering->rx_max_pending = tp->rx_std_ring_mask;
12356         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12357                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12358         else
12359                 ering->rx_jumbo_max_pending = 0;
12360
12361         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12362
12363         ering->rx_pending = tp->rx_pending;
12364         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12365                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12366         else
12367                 ering->rx_jumbo_pending = 0;
12368
12369         ering->tx_pending = tp->napi[0].tx_pending;
12370 }
12371
12372 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12373 {
12374         struct tg3 *tp = netdev_priv(dev);
12375         int i, irq_sync = 0, err = 0;
12376
12377         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12378             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12379             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12380             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12381             (tg3_flag(tp, TSO_BUG) &&
12382              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12383                 return -EINVAL;
12384
12385         if (netif_running(dev)) {
12386                 tg3_phy_stop(tp);
12387                 tg3_netif_stop(tp);
12388                 irq_sync = 1;
12389         }
12390
12391         tg3_full_lock(tp, irq_sync);
12392
12393         tp->rx_pending = ering->rx_pending;
12394
12395         if (tg3_flag(tp, MAX_RXPEND_64) &&
12396             tp->rx_pending > 63)
12397                 tp->rx_pending = 63;
12398
12399         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12400                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12401
12402         for (i = 0; i < tp->irq_max; i++)
12403                 tp->napi[i].tx_pending = ering->tx_pending;
12404
12405         if (netif_running(dev)) {
12406                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12407                 err = tg3_restart_hw(tp, false);
12408                 if (!err)
12409                         tg3_netif_start(tp);
12410         }
12411
12412         tg3_full_unlock(tp);
12413
12414         if (irq_sync && !err)
12415                 tg3_phy_start(tp);
12416
12417         return err;
12418 }
12419
12420 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12421 {
12422         struct tg3 *tp = netdev_priv(dev);
12423
12424         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12425
12426         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12427                 epause->rx_pause = 1;
12428         else
12429                 epause->rx_pause = 0;
12430
12431         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12432                 epause->tx_pause = 1;
12433         else
12434                 epause->tx_pause = 0;
12435 }
12436
12437 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12438 {
12439         struct tg3 *tp = netdev_priv(dev);
12440         int err = 0;
12441
12442         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12443                 tg3_warn_mgmt_link_flap(tp);
12444
12445         if (tg3_flag(tp, USE_PHYLIB)) {
12446                 u32 newadv;
12447                 struct phy_device *phydev;
12448
12449                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12450
12451                 if (!(phydev->supported & SUPPORTED_Pause) ||
12452                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12453                      (epause->rx_pause != epause->tx_pause)))
12454                         return -EINVAL;
12455
12456                 tp->link_config.flowctrl = 0;
12457                 if (epause->rx_pause) {
12458                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12459
12460                         if (epause->tx_pause) {
12461                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12462                                 newadv = ADVERTISED_Pause;
12463                         } else
12464                                 newadv = ADVERTISED_Pause |
12465                                          ADVERTISED_Asym_Pause;
12466                 } else if (epause->tx_pause) {
12467                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12468                         newadv = ADVERTISED_Asym_Pause;
12469                 } else
12470                         newadv = 0;
12471
12472                 if (epause->autoneg)
12473                         tg3_flag_set(tp, PAUSE_AUTONEG);
12474                 else
12475                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12476
12477                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12478                         u32 oldadv = phydev->advertising &
12479                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12480                         if (oldadv != newadv) {
12481                                 phydev->advertising &=
12482                                         ~(ADVERTISED_Pause |
12483                                           ADVERTISED_Asym_Pause);
12484                                 phydev->advertising |= newadv;
12485                                 if (phydev->autoneg) {
12486                                         /*
12487                                          * Always renegotiate the link to
12488                                          * inform our link partner of our
12489                                          * flow control settings, even if the
12490                                          * flow control is forced.  Let
12491                                          * tg3_adjust_link() do the final
12492                                          * flow control setup.
12493                                          */
12494                                         return phy_start_aneg(phydev);
12495                                 }
12496                         }
12497
12498                         if (!epause->autoneg)
12499                                 tg3_setup_flow_control(tp, 0, 0);
12500                 } else {
12501                         tp->link_config.advertising &=
12502                                         ~(ADVERTISED_Pause |
12503                                           ADVERTISED_Asym_Pause);
12504                         tp->link_config.advertising |= newadv;
12505                 }
12506         } else {
12507                 int irq_sync = 0;
12508
12509                 if (netif_running(dev)) {
12510                         tg3_netif_stop(tp);
12511                         irq_sync = 1;
12512                 }
12513
12514                 tg3_full_lock(tp, irq_sync);
12515
12516                 if (epause->autoneg)
12517                         tg3_flag_set(tp, PAUSE_AUTONEG);
12518                 else
12519                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12520                 if (epause->rx_pause)
12521                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12522                 else
12523                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12524                 if (epause->tx_pause)
12525                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12526                 else
12527                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12528
12529                 if (netif_running(dev)) {
12530                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12531                         err = tg3_restart_hw(tp, false);
12532                         if (!err)
12533                                 tg3_netif_start(tp);
12534                 }
12535
12536                 tg3_full_unlock(tp);
12537         }
12538
12539         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12540
12541         return err;
12542 }
12543
12544 static int tg3_get_sset_count(struct net_device *dev, int sset)
12545 {
12546         switch (sset) {
12547         case ETH_SS_TEST:
12548                 return TG3_NUM_TEST;
12549         case ETH_SS_STATS:
12550                 return TG3_NUM_STATS;
12551         default:
12552                 return -EOPNOTSUPP;
12553         }
12554 }
12555
12556 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12557                          u32 *rules __always_unused)
12558 {
12559         struct tg3 *tp = netdev_priv(dev);
12560
12561         if (!tg3_flag(tp, SUPPORT_MSIX))
12562                 return -EOPNOTSUPP;
12563
12564         switch (info->cmd) {
12565         case ETHTOOL_GRXRINGS:
12566                 if (netif_running(tp->dev))
12567                         info->data = tp->rxq_cnt;
12568                 else {
12569                         info->data = num_online_cpus();
12570                         if (info->data > TG3_RSS_MAX_NUM_QS)
12571                                 info->data = TG3_RSS_MAX_NUM_QS;
12572                 }
12573
12574                 return 0;
12575
12576         default:
12577                 return -EOPNOTSUPP;
12578         }
12579 }
12580
12581 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12582 {
12583         u32 size = 0;
12584         struct tg3 *tp = netdev_priv(dev);
12585
12586         if (tg3_flag(tp, SUPPORT_MSIX))
12587                 size = TG3_RSS_INDIR_TBL_SIZE;
12588
12589         return size;
12590 }
12591
12592 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12593 {
12594         struct tg3 *tp = netdev_priv(dev);
12595         int i;
12596
12597         if (hfunc)
12598                 *hfunc = ETH_RSS_HASH_TOP;
12599         if (!indir)
12600                 return 0;
12601
12602         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12603                 indir[i] = tp->rss_ind_tbl[i];
12604
12605         return 0;
12606 }
12607
12608 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12609                         const u8 hfunc)
12610 {
12611         struct tg3 *tp = netdev_priv(dev);
12612         size_t i;
12613
12614         /* We require at least one supported parameter to be changed and no
12615          * change in any of the unsupported parameters
12616          */
12617         if (key ||
12618             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12619                 return -EOPNOTSUPP;
12620
12621         if (!indir)
12622                 return 0;
12623
12624         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12625                 tp->rss_ind_tbl[i] = indir[i];
12626
12627         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12628                 return 0;
12629
12630         /* It is legal to write the indirection
12631          * table while the device is running.
12632          */
12633         tg3_full_lock(tp, 0);
12634         tg3_rss_write_indir_tbl(tp);
12635         tg3_full_unlock(tp);
12636
12637         return 0;
12638 }
12639
12640 static void tg3_get_channels(struct net_device *dev,
12641                              struct ethtool_channels *channel)
12642 {
12643         struct tg3 *tp = netdev_priv(dev);
12644         u32 deflt_qs = netif_get_num_default_rss_queues();
12645
12646         channel->max_rx = tp->rxq_max;
12647         channel->max_tx = tp->txq_max;
12648
12649         if (netif_running(dev)) {
12650                 channel->rx_count = tp->rxq_cnt;
12651                 channel->tx_count = tp->txq_cnt;
12652         } else {
12653                 if (tp->rxq_req)
12654                         channel->rx_count = tp->rxq_req;
12655                 else
12656                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12657
12658                 if (tp->txq_req)
12659                         channel->tx_count = tp->txq_req;
12660                 else
12661                         channel->tx_count = min(deflt_qs, tp->txq_max);
12662         }
12663 }
12664
12665 static int tg3_set_channels(struct net_device *dev,
12666                             struct ethtool_channels *channel)
12667 {
12668         struct tg3 *tp = netdev_priv(dev);
12669
12670         if (!tg3_flag(tp, SUPPORT_MSIX))
12671                 return -EOPNOTSUPP;
12672
12673         if (channel->rx_count > tp->rxq_max ||
12674             channel->tx_count > tp->txq_max)
12675                 return -EINVAL;
12676
12677         tp->rxq_req = channel->rx_count;
12678         tp->txq_req = channel->tx_count;
12679
12680         if (!netif_running(dev))
12681                 return 0;
12682
12683         tg3_stop(tp);
12684
12685         tg3_carrier_off(tp);
12686
12687         tg3_start(tp, true, false, false);
12688
12689         return 0;
12690 }
12691
12692 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12693 {
12694         switch (stringset) {
12695         case ETH_SS_STATS:
12696                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12697                 break;
12698         case ETH_SS_TEST:
12699                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12700                 break;
12701         default:
12702                 WARN_ON(1);     /* we need a WARN() */
12703                 break;
12704         }
12705 }
12706
12707 static int tg3_set_phys_id(struct net_device *dev,
12708                             enum ethtool_phys_id_state state)
12709 {
12710         struct tg3 *tp = netdev_priv(dev);
12711
12712         if (!netif_running(tp->dev))
12713                 return -EAGAIN;
12714
12715         switch (state) {
12716         case ETHTOOL_ID_ACTIVE:
12717                 return 1;       /* cycle on/off once per second */
12718
12719         case ETHTOOL_ID_ON:
12720                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12721                      LED_CTRL_1000MBPS_ON |
12722                      LED_CTRL_100MBPS_ON |
12723                      LED_CTRL_10MBPS_ON |
12724                      LED_CTRL_TRAFFIC_OVERRIDE |
12725                      LED_CTRL_TRAFFIC_BLINK |
12726                      LED_CTRL_TRAFFIC_LED);
12727                 break;
12728
12729         case ETHTOOL_ID_OFF:
12730                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12731                      LED_CTRL_TRAFFIC_OVERRIDE);
12732                 break;
12733
12734         case ETHTOOL_ID_INACTIVE:
12735                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12736                 break;
12737         }
12738
12739         return 0;
12740 }
12741
12742 static void tg3_get_ethtool_stats(struct net_device *dev,
12743                                    struct ethtool_stats *estats, u64 *tmp_stats)
12744 {
12745         struct tg3 *tp = netdev_priv(dev);
12746
12747         if (tp->hw_stats)
12748                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12749         else
12750                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12751 }
12752
12753 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12754 {
12755         int i;
12756         __be32 *buf;
12757         u32 offset = 0, len = 0;
12758         u32 magic, val;
12759
12760         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12761                 return NULL;
12762
12763         if (magic == TG3_EEPROM_MAGIC) {
12764                 for (offset = TG3_NVM_DIR_START;
12765                      offset < TG3_NVM_DIR_END;
12766                      offset += TG3_NVM_DIRENT_SIZE) {
12767                         if (tg3_nvram_read(tp, offset, &val))
12768                                 return NULL;
12769
12770                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12771                             TG3_NVM_DIRTYPE_EXTVPD)
12772                                 break;
12773                 }
12774
12775                 if (offset != TG3_NVM_DIR_END) {
12776                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12777                         if (tg3_nvram_read(tp, offset + 4, &offset))
12778                                 return NULL;
12779
12780                         offset = tg3_nvram_logical_addr(tp, offset);
12781                 }
12782         }
12783
12784         if (!offset || !len) {
12785                 offset = TG3_NVM_VPD_OFF;
12786                 len = TG3_NVM_VPD_LEN;
12787         }
12788
12789         buf = kmalloc(len, GFP_KERNEL);
12790         if (buf == NULL)
12791                 return NULL;
12792
12793         if (magic == TG3_EEPROM_MAGIC) {
12794                 for (i = 0; i < len; i += 4) {
12795                         /* The data is in little-endian format in NVRAM.
12796                          * Use the big-endian read routines to preserve
12797                          * the byte order as it exists in NVRAM.
12798                          */
12799                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12800                                 goto error;
12801                 }
12802         } else {
12803                 u8 *ptr;
12804                 ssize_t cnt;
12805                 unsigned int pos = 0;
12806
12807                 ptr = (u8 *)&buf[0];
12808                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12809                         cnt = pci_read_vpd(tp->pdev, pos,
12810                                            len - pos, ptr);
12811                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12812                                 cnt = 0;
12813                         else if (cnt < 0)
12814                                 goto error;
12815                 }
12816                 if (pos != len)
12817                         goto error;
12818         }
12819
12820         *vpdlen = len;
12821
12822         return buf;
12823
12824 error:
12825         kfree(buf);
12826         return NULL;
12827 }
12828
12829 #define NVRAM_TEST_SIZE 0x100
12830 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12831 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12832 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12833 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12834 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12835 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12836 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12837 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12838
12839 static int tg3_test_nvram(struct tg3 *tp)
12840 {
12841         u32 csum, magic, len;
12842         __be32 *buf;
12843         int i, j, k, err = 0, size;
12844
12845         if (tg3_flag(tp, NO_NVRAM))
12846                 return 0;
12847
12848         if (tg3_nvram_read(tp, 0, &magic) != 0)
12849                 return -EIO;
12850
12851         if (magic == TG3_EEPROM_MAGIC)
12852                 size = NVRAM_TEST_SIZE;
12853         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12854                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12855                     TG3_EEPROM_SB_FORMAT_1) {
12856                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12857                         case TG3_EEPROM_SB_REVISION_0:
12858                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12859                                 break;
12860                         case TG3_EEPROM_SB_REVISION_2:
12861                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12862                                 break;
12863                         case TG3_EEPROM_SB_REVISION_3:
12864                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12865                                 break;
12866                         case TG3_EEPROM_SB_REVISION_4:
12867                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12868                                 break;
12869                         case TG3_EEPROM_SB_REVISION_5:
12870                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12871                                 break;
12872                         case TG3_EEPROM_SB_REVISION_6:
12873                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12874                                 break;
12875                         default:
12876                                 return -EIO;
12877                         }
12878                 } else
12879                         return 0;
12880         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12881                 size = NVRAM_SELFBOOT_HW_SIZE;
12882         else
12883                 return -EIO;
12884
12885         buf = kmalloc(size, GFP_KERNEL);
12886         if (buf == NULL)
12887                 return -ENOMEM;
12888
12889         err = -EIO;
12890         for (i = 0, j = 0; i < size; i += 4, j++) {
12891                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12892                 if (err)
12893                         break;
12894         }
12895         if (i < size)
12896                 goto out;
12897
12898         /* Selfboot format */
12899         magic = be32_to_cpu(buf[0]);
12900         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12901             TG3_EEPROM_MAGIC_FW) {
12902                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12903
12904                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12905                     TG3_EEPROM_SB_REVISION_2) {
12906                         /* For rev 2, the csum doesn't include the MBA. */
12907                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12908                                 csum8 += buf8[i];
12909                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12910                                 csum8 += buf8[i];
12911                 } else {
12912                         for (i = 0; i < size; i++)
12913                                 csum8 += buf8[i];
12914                 }
12915
12916                 if (csum8 == 0) {
12917                         err = 0;
12918                         goto out;
12919                 }
12920
12921                 err = -EIO;
12922                 goto out;
12923         }
12924
12925         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12926             TG3_EEPROM_MAGIC_HW) {
12927                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12928                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12929                 u8 *buf8 = (u8 *) buf;
12930
12931                 /* Separate the parity bits and the data bytes.  */
12932                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12933                         if ((i == 0) || (i == 8)) {
12934                                 int l;
12935                                 u8 msk;
12936
12937                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12938                                         parity[k++] = buf8[i] & msk;
12939                                 i++;
12940                         } else if (i == 16) {
12941                                 int l;
12942                                 u8 msk;
12943
12944                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12945                                         parity[k++] = buf8[i] & msk;
12946                                 i++;
12947
12948                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12949                                         parity[k++] = buf8[i] & msk;
12950                                 i++;
12951                         }
12952                         data[j++] = buf8[i];
12953                 }
12954
12955                 err = -EIO;
12956                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12957                         u8 hw8 = hweight8(data[i]);
12958
12959                         if ((hw8 & 0x1) && parity[i])
12960                                 goto out;
12961                         else if (!(hw8 & 0x1) && !parity[i])
12962                                 goto out;
12963                 }
12964                 err = 0;
12965                 goto out;
12966         }
12967
12968         err = -EIO;
12969
12970         /* Bootstrap checksum at offset 0x10 */
12971         csum = calc_crc((unsigned char *) buf, 0x10);
12972         if (csum != le32_to_cpu(buf[0x10/4]))
12973                 goto out;
12974
12975         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12976         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12977         if (csum != le32_to_cpu(buf[0xfc/4]))
12978                 goto out;
12979
12980         kfree(buf);
12981
12982         buf = tg3_vpd_readblock(tp, &len);
12983         if (!buf)
12984                 return -ENOMEM;
12985
12986         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12987         if (i > 0) {
12988                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12989                 if (j < 0)
12990                         goto out;
12991
12992                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12993                         goto out;
12994
12995                 i += PCI_VPD_LRDT_TAG_SIZE;
12996                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12997                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12998                 if (j > 0) {
12999                         u8 csum8 = 0;
13000
13001                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13002
13003                         for (i = 0; i <= j; i++)
13004                                 csum8 += ((u8 *)buf)[i];
13005
13006                         if (csum8)
13007                                 goto out;
13008                 }
13009         }
13010
13011         err = 0;
13012
13013 out:
13014         kfree(buf);
13015         return err;
13016 }
13017
13018 #define TG3_SERDES_TIMEOUT_SEC  2
13019 #define TG3_COPPER_TIMEOUT_SEC  6
13020
13021 static int tg3_test_link(struct tg3 *tp)
13022 {
13023         int i, max;
13024
13025         if (!netif_running(tp->dev))
13026                 return -ENODEV;
13027
13028         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13029                 max = TG3_SERDES_TIMEOUT_SEC;
13030         else
13031                 max = TG3_COPPER_TIMEOUT_SEC;
13032
13033         for (i = 0; i < max; i++) {
13034                 if (tp->link_up)
13035                         return 0;
13036
13037                 if (msleep_interruptible(1000))
13038                         break;
13039         }
13040
13041         return -EIO;
13042 }
13043
13044 /* Only test the commonly used registers */
13045 static int tg3_test_registers(struct tg3 *tp)
13046 {
13047         int i, is_5705, is_5750;
13048         u32 offset, read_mask, write_mask, val, save_val, read_val;
13049         static struct {
13050                 u16 offset;
13051                 u16 flags;
13052 #define TG3_FL_5705     0x1
13053 #define TG3_FL_NOT_5705 0x2
13054 #define TG3_FL_NOT_5788 0x4
13055 #define TG3_FL_NOT_5750 0x8
13056                 u32 read_mask;
13057                 u32 write_mask;
13058         } reg_tbl[] = {
13059                 /* MAC Control Registers */
13060                 { MAC_MODE, TG3_FL_NOT_5705,
13061                         0x00000000, 0x00ef6f8c },
13062                 { MAC_MODE, TG3_FL_5705,
13063                         0x00000000, 0x01ef6b8c },
13064                 { MAC_STATUS, TG3_FL_NOT_5705,
13065                         0x03800107, 0x00000000 },
13066                 { MAC_STATUS, TG3_FL_5705,
13067                         0x03800100, 0x00000000 },
13068                 { MAC_ADDR_0_HIGH, 0x0000,
13069                         0x00000000, 0x0000ffff },
13070                 { MAC_ADDR_0_LOW, 0x0000,
13071                         0x00000000, 0xffffffff },
13072                 { MAC_RX_MTU_SIZE, 0x0000,
13073                         0x00000000, 0x0000ffff },
13074                 { MAC_TX_MODE, 0x0000,
13075                         0x00000000, 0x00000070 },
13076                 { MAC_TX_LENGTHS, 0x0000,
13077                         0x00000000, 0x00003fff },
13078                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13079                         0x00000000, 0x000007fc },
13080                 { MAC_RX_MODE, TG3_FL_5705,
13081                         0x00000000, 0x000007dc },
13082                 { MAC_HASH_REG_0, 0x0000,
13083                         0x00000000, 0xffffffff },
13084                 { MAC_HASH_REG_1, 0x0000,
13085                         0x00000000, 0xffffffff },
13086                 { MAC_HASH_REG_2, 0x0000,
13087                         0x00000000, 0xffffffff },
13088                 { MAC_HASH_REG_3, 0x0000,
13089                         0x00000000, 0xffffffff },
13090
13091                 /* Receive Data and Receive BD Initiator Control Registers. */
13092                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13093                         0x00000000, 0xffffffff },
13094                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13095                         0x00000000, 0xffffffff },
13096                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13097                         0x00000000, 0x00000003 },
13098                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13099                         0x00000000, 0xffffffff },
13100                 { RCVDBDI_STD_BD+0, 0x0000,
13101                         0x00000000, 0xffffffff },
13102                 { RCVDBDI_STD_BD+4, 0x0000,
13103                         0x00000000, 0xffffffff },
13104                 { RCVDBDI_STD_BD+8, 0x0000,
13105                         0x00000000, 0xffff0002 },
13106                 { RCVDBDI_STD_BD+0xc, 0x0000,
13107                         0x00000000, 0xffffffff },
13108
13109                 /* Receive BD Initiator Control Registers. */
13110                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13111                         0x00000000, 0xffffffff },
13112                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13113                         0x00000000, 0x000003ff },
13114                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13115                         0x00000000, 0xffffffff },
13116
13117                 /* Host Coalescing Control Registers. */
13118                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13119                         0x00000000, 0x00000004 },
13120                 { HOSTCC_MODE, TG3_FL_5705,
13121                         0x00000000, 0x000000f6 },
13122                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13125                         0x00000000, 0x000003ff },
13126                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13127                         0x00000000, 0xffffffff },
13128                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13129                         0x00000000, 0x000003ff },
13130                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13131                         0x00000000, 0xffffffff },
13132                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13133                         0x00000000, 0x000000ff },
13134                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13135                         0x00000000, 0xffffffff },
13136                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13137                         0x00000000, 0x000000ff },
13138                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13139                         0x00000000, 0xffffffff },
13140                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13141                         0x00000000, 0xffffffff },
13142                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13143                         0x00000000, 0xffffffff },
13144                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13145                         0x00000000, 0x000000ff },
13146                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13149                         0x00000000, 0x000000ff },
13150                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13153                         0x00000000, 0xffffffff },
13154                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13155                         0x00000000, 0xffffffff },
13156                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13157                         0x00000000, 0xffffffff },
13158                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13159                         0x00000000, 0xffffffff },
13160                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13161                         0xffffffff, 0x00000000 },
13162                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13163                         0xffffffff, 0x00000000 },
13164
13165                 /* Buffer Manager Control Registers. */
13166                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13167                         0x00000000, 0x007fff80 },
13168                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13169                         0x00000000, 0x007fffff },
13170                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13171                         0x00000000, 0x0000003f },
13172                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13173                         0x00000000, 0x000001ff },
13174                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13175                         0x00000000, 0x000001ff },
13176                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13177                         0xffffffff, 0x00000000 },
13178                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13179                         0xffffffff, 0x00000000 },
13180
13181                 /* Mailbox Registers */
13182                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13183                         0x00000000, 0x000001ff },
13184                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13185                         0x00000000, 0x000001ff },
13186                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13187                         0x00000000, 0x000007ff },
13188                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13189                         0x00000000, 0x000001ff },
13190
13191                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13192         };
13193
13194         is_5705 = is_5750 = 0;
13195         if (tg3_flag(tp, 5705_PLUS)) {
13196                 is_5705 = 1;
13197                 if (tg3_flag(tp, 5750_PLUS))
13198                         is_5750 = 1;
13199         }
13200
13201         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13202                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13203                         continue;
13204
13205                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13206                         continue;
13207
13208                 if (tg3_flag(tp, IS_5788) &&
13209                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13210                         continue;
13211
13212                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13213                         continue;
13214
13215                 offset = (u32) reg_tbl[i].offset;
13216                 read_mask = reg_tbl[i].read_mask;
13217                 write_mask = reg_tbl[i].write_mask;
13218
13219                 /* Save the original register content */
13220                 save_val = tr32(offset);
13221
13222                 /* Determine the read-only value. */
13223                 read_val = save_val & read_mask;
13224
13225                 /* Write zero to the register, then make sure the read-only bits
13226                  * are not changed and the read/write bits are all zeros.
13227                  */
13228                 tw32(offset, 0);
13229
13230                 val = tr32(offset);
13231
13232                 /* Test the read-only and read/write bits. */
13233                 if (((val & read_mask) != read_val) || (val & write_mask))
13234                         goto out;
13235
13236                 /* Write ones to all the bits defined by RdMask and WrMask, then
13237                  * make sure the read-only bits are not changed and the
13238                  * read/write bits are all ones.
13239                  */
13240                 tw32(offset, read_mask | write_mask);
13241
13242                 val = tr32(offset);
13243
13244                 /* Test the read-only bits. */
13245                 if ((val & read_mask) != read_val)
13246                         goto out;
13247
13248                 /* Test the read/write bits. */
13249                 if ((val & write_mask) != write_mask)
13250                         goto out;
13251
13252                 tw32(offset, save_val);
13253         }
13254
13255         return 0;
13256
13257 out:
13258         if (netif_msg_hw(tp))
13259                 netdev_err(tp->dev,
13260                            "Register test failed at offset %x\n", offset);
13261         tw32(offset, save_val);
13262         return -EIO;
13263 }
13264
13265 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13266 {
13267         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13268         int i;
13269         u32 j;
13270
13271         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13272                 for (j = 0; j < len; j += 4) {
13273                         u32 val;
13274
13275                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13276                         tg3_read_mem(tp, offset + j, &val);
13277                         if (val != test_pattern[i])
13278                                 return -EIO;
13279                 }
13280         }
13281         return 0;
13282 }
13283
13284 static int tg3_test_memory(struct tg3 *tp)
13285 {
13286         static struct mem_entry {
13287                 u32 offset;
13288                 u32 len;
13289         } mem_tbl_570x[] = {
13290                 { 0x00000000, 0x00b50},
13291                 { 0x00002000, 0x1c000},
13292                 { 0xffffffff, 0x00000}
13293         }, mem_tbl_5705[] = {
13294                 { 0x00000100, 0x0000c},
13295                 { 0x00000200, 0x00008},
13296                 { 0x00004000, 0x00800},
13297                 { 0x00006000, 0x01000},
13298                 { 0x00008000, 0x02000},
13299                 { 0x00010000, 0x0e000},
13300                 { 0xffffffff, 0x00000}
13301         }, mem_tbl_5755[] = {
13302                 { 0x00000200, 0x00008},
13303                 { 0x00004000, 0x00800},
13304                 { 0x00006000, 0x00800},
13305                 { 0x00008000, 0x02000},
13306                 { 0x00010000, 0x0c000},
13307                 { 0xffffffff, 0x00000}
13308         }, mem_tbl_5906[] = {
13309                 { 0x00000200, 0x00008},
13310                 { 0x00004000, 0x00400},
13311                 { 0x00006000, 0x00400},
13312                 { 0x00008000, 0x01000},
13313                 { 0x00010000, 0x01000},
13314                 { 0xffffffff, 0x00000}
13315         }, mem_tbl_5717[] = {
13316                 { 0x00000200, 0x00008},
13317                 { 0x00010000, 0x0a000},
13318                 { 0x00020000, 0x13c00},
13319                 { 0xffffffff, 0x00000}
13320         }, mem_tbl_57765[] = {
13321                 { 0x00000200, 0x00008},
13322                 { 0x00004000, 0x00800},
13323                 { 0x00006000, 0x09800},
13324                 { 0x00010000, 0x0a000},
13325                 { 0xffffffff, 0x00000}
13326         };
13327         struct mem_entry *mem_tbl;
13328         int err = 0;
13329         int i;
13330
13331         if (tg3_flag(tp, 5717_PLUS))
13332                 mem_tbl = mem_tbl_5717;
13333         else if (tg3_flag(tp, 57765_CLASS) ||
13334                  tg3_asic_rev(tp) == ASIC_REV_5762)
13335                 mem_tbl = mem_tbl_57765;
13336         else if (tg3_flag(tp, 5755_PLUS))
13337                 mem_tbl = mem_tbl_5755;
13338         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13339                 mem_tbl = mem_tbl_5906;
13340         else if (tg3_flag(tp, 5705_PLUS))
13341                 mem_tbl = mem_tbl_5705;
13342         else
13343                 mem_tbl = mem_tbl_570x;
13344
13345         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13346                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13347                 if (err)
13348                         break;
13349         }
13350
13351         return err;
13352 }
13353
13354 #define TG3_TSO_MSS             500
13355
13356 #define TG3_TSO_IP_HDR_LEN      20
13357 #define TG3_TSO_TCP_HDR_LEN     20
13358 #define TG3_TSO_TCP_OPT_LEN     12
13359
13360 static const u8 tg3_tso_header[] = {
13361 0x08, 0x00,
13362 0x45, 0x00, 0x00, 0x00,
13363 0x00, 0x00, 0x40, 0x00,
13364 0x40, 0x06, 0x00, 0x00,
13365 0x0a, 0x00, 0x00, 0x01,
13366 0x0a, 0x00, 0x00, 0x02,
13367 0x0d, 0x00, 0xe0, 0x00,
13368 0x00, 0x00, 0x01, 0x00,
13369 0x00, 0x00, 0x02, 0x00,
13370 0x80, 0x10, 0x10, 0x00,
13371 0x14, 0x09, 0x00, 0x00,
13372 0x01, 0x01, 0x08, 0x0a,
13373 0x11, 0x11, 0x11, 0x11,
13374 0x11, 0x11, 0x11, 0x11,
13375 };
13376
13377 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13378 {
13379         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13380         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13381         u32 budget;
13382         struct sk_buff *skb;
13383         u8 *tx_data, *rx_data;
13384         dma_addr_t map;
13385         int num_pkts, tx_len, rx_len, i, err;
13386         struct tg3_rx_buffer_desc *desc;
13387         struct tg3_napi *tnapi, *rnapi;
13388         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13389
13390         tnapi = &tp->napi[0];
13391         rnapi = &tp->napi[0];
13392         if (tp->irq_cnt > 1) {
13393                 if (tg3_flag(tp, ENABLE_RSS))
13394                         rnapi = &tp->napi[1];
13395                 if (tg3_flag(tp, ENABLE_TSS))
13396                         tnapi = &tp->napi[1];
13397         }
13398         coal_now = tnapi->coal_now | rnapi->coal_now;
13399
13400         err = -EIO;
13401
13402         tx_len = pktsz;
13403         skb = netdev_alloc_skb(tp->dev, tx_len);
13404         if (!skb)
13405                 return -ENOMEM;
13406
13407         tx_data = skb_put(skb, tx_len);
13408         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13409         memset(tx_data + ETH_ALEN, 0x0, 8);
13410
13411         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13412
13413         if (tso_loopback) {
13414                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13415
13416                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13417                               TG3_TSO_TCP_OPT_LEN;
13418
13419                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13420                        sizeof(tg3_tso_header));
13421                 mss = TG3_TSO_MSS;
13422
13423                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13424                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13425
13426                 /* Set the total length field in the IP header */
13427                 iph->tot_len = htons((u16)(mss + hdr_len));
13428
13429                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13430                               TXD_FLAG_CPU_POST_DMA);
13431
13432                 if (tg3_flag(tp, HW_TSO_1) ||
13433                     tg3_flag(tp, HW_TSO_2) ||
13434                     tg3_flag(tp, HW_TSO_3)) {
13435                         struct tcphdr *th;
13436                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13437                         th = (struct tcphdr *)&tx_data[val];
13438                         th->check = 0;
13439                 } else
13440                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13441
13442                 if (tg3_flag(tp, HW_TSO_3)) {
13443                         mss |= (hdr_len & 0xc) << 12;
13444                         if (hdr_len & 0x10)
13445                                 base_flags |= 0x00000010;
13446                         base_flags |= (hdr_len & 0x3e0) << 5;
13447                 } else if (tg3_flag(tp, HW_TSO_2))
13448                         mss |= hdr_len << 9;
13449                 else if (tg3_flag(tp, HW_TSO_1) ||
13450                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13451                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13452                 } else {
13453                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13454                 }
13455
13456                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13457         } else {
13458                 num_pkts = 1;
13459                 data_off = ETH_HLEN;
13460
13461                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13462                     tx_len > VLAN_ETH_FRAME_LEN)
13463                         base_flags |= TXD_FLAG_JMB_PKT;
13464         }
13465
13466         for (i = data_off; i < tx_len; i++)
13467                 tx_data[i] = (u8) (i & 0xff);
13468
13469         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13470         if (pci_dma_mapping_error(tp->pdev, map)) {
13471                 dev_kfree_skb(skb);
13472                 return -EIO;
13473         }
13474
13475         val = tnapi->tx_prod;
13476         tnapi->tx_buffers[val].skb = skb;
13477         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13478
13479         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13480                rnapi->coal_now);
13481
13482         udelay(10);
13483
13484         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13485
13486         budget = tg3_tx_avail(tnapi);
13487         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13488                             base_flags | TXD_FLAG_END, mss, 0)) {
13489                 tnapi->tx_buffers[val].skb = NULL;
13490                 dev_kfree_skb(skb);
13491                 return -EIO;
13492         }
13493
13494         tnapi->tx_prod++;
13495
13496         /* Sync BD data before updating mailbox */
13497         wmb();
13498
13499         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13500         tr32_mailbox(tnapi->prodmbox);
13501
13502         udelay(10);
13503
13504         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13505         for (i = 0; i < 35; i++) {
13506                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13507                        coal_now);
13508
13509                 udelay(10);
13510
13511                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13512                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13513                 if ((tx_idx == tnapi->tx_prod) &&
13514                     (rx_idx == (rx_start_idx + num_pkts)))
13515                         break;
13516         }
13517
13518         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13519         dev_kfree_skb(skb);
13520
13521         if (tx_idx != tnapi->tx_prod)
13522                 goto out;
13523
13524         if (rx_idx != rx_start_idx + num_pkts)
13525                 goto out;
13526
13527         val = data_off;
13528         while (rx_idx != rx_start_idx) {
13529                 desc = &rnapi->rx_rcb[rx_start_idx++];
13530                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13531                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13532
13533                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13534                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13535                         goto out;
13536
13537                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13538                          - ETH_FCS_LEN;
13539
13540                 if (!tso_loopback) {
13541                         if (rx_len != tx_len)
13542                                 goto out;
13543
13544                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13545                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13546                                         goto out;
13547                         } else {
13548                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13549                                         goto out;
13550                         }
13551                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13552                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13553                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13554                         goto out;
13555                 }
13556
13557                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13558                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13559                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13560                                              mapping);
13561                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13562                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13563                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13564                                              mapping);
13565                 } else
13566                         goto out;
13567
13568                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13569                                             PCI_DMA_FROMDEVICE);
13570
13571                 rx_data += TG3_RX_OFFSET(tp);
13572                 for (i = data_off; i < rx_len; i++, val++) {
13573                         if (*(rx_data + i) != (u8) (val & 0xff))
13574                                 goto out;
13575                 }
13576         }
13577
13578         err = 0;
13579
13580         /* tg3_free_rings will unmap and free the rx_data */
13581 out:
13582         return err;
13583 }
13584
13585 #define TG3_STD_LOOPBACK_FAILED         1
13586 #define TG3_JMB_LOOPBACK_FAILED         2
13587 #define TG3_TSO_LOOPBACK_FAILED         4
13588 #define TG3_LOOPBACK_FAILED \
13589         (TG3_STD_LOOPBACK_FAILED | \
13590          TG3_JMB_LOOPBACK_FAILED | \
13591          TG3_TSO_LOOPBACK_FAILED)
13592
13593 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13594 {
13595         int err = -EIO;
13596         u32 eee_cap;
13597         u32 jmb_pkt_sz = 9000;
13598
13599         if (tp->dma_limit)
13600                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13601
13602         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13603         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13604
13605         if (!netif_running(tp->dev)) {
13606                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13607                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608                 if (do_extlpbk)
13609                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13610                 goto done;
13611         }
13612
13613         err = tg3_reset_hw(tp, true);
13614         if (err) {
13615                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13616                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617                 if (do_extlpbk)
13618                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13619                 goto done;
13620         }
13621
13622         if (tg3_flag(tp, ENABLE_RSS)) {
13623                 int i;
13624
13625                 /* Reroute all rx packets to the 1st queue */
13626                 for (i = MAC_RSS_INDIR_TBL_0;
13627                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13628                         tw32(i, 0x0);
13629         }
13630
13631         /* HW errata - mac loopback fails in some cases on 5780.
13632          * Normal traffic and PHY loopback are not affected by
13633          * errata.  Also, the MAC loopback test is deprecated for
13634          * all newer ASIC revisions.
13635          */
13636         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13637             !tg3_flag(tp, CPMU_PRESENT)) {
13638                 tg3_mac_loopback(tp, true);
13639
13640                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13641                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13642
13643                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13644                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13645                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13646
13647                 tg3_mac_loopback(tp, false);
13648         }
13649
13650         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13651             !tg3_flag(tp, USE_PHYLIB)) {
13652                 int i;
13653
13654                 tg3_phy_lpbk_set(tp, 0, false);
13655
13656                 /* Wait for link */
13657                 for (i = 0; i < 100; i++) {
13658                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13659                                 break;
13660                         mdelay(1);
13661                 }
13662
13663                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13664                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13665                 if (tg3_flag(tp, TSO_CAPABLE) &&
13666                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13667                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13668                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13669                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13670                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13671
13672                 if (do_extlpbk) {
13673                         tg3_phy_lpbk_set(tp, 0, true);
13674
13675                         /* All link indications report up, but the hardware
13676                          * isn't really ready for about 20 msec.  Double it
13677                          * to be sure.
13678                          */
13679                         mdelay(40);
13680
13681                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13682                                 data[TG3_EXT_LOOPB_TEST] |=
13683                                                         TG3_STD_LOOPBACK_FAILED;
13684                         if (tg3_flag(tp, TSO_CAPABLE) &&
13685                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13686                                 data[TG3_EXT_LOOPB_TEST] |=
13687                                                         TG3_TSO_LOOPBACK_FAILED;
13688                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13689                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13690                                 data[TG3_EXT_LOOPB_TEST] |=
13691                                                         TG3_JMB_LOOPBACK_FAILED;
13692                 }
13693
13694                 /* Re-enable gphy autopowerdown. */
13695                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13696                         tg3_phy_toggle_apd(tp, true);
13697         }
13698
13699         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13700                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13701
13702 done:
13703         tp->phy_flags |= eee_cap;
13704
13705         return err;
13706 }
13707
13708 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13709                           u64 *data)
13710 {
13711         struct tg3 *tp = netdev_priv(dev);
13712         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13713
13714         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13715                 if (tg3_power_up(tp)) {
13716                         etest->flags |= ETH_TEST_FL_FAILED;
13717                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13718                         return;
13719                 }
13720                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13721         }
13722
13723         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13724
13725         if (tg3_test_nvram(tp) != 0) {
13726                 etest->flags |= ETH_TEST_FL_FAILED;
13727                 data[TG3_NVRAM_TEST] = 1;
13728         }
13729         if (!doextlpbk && tg3_test_link(tp)) {
13730                 etest->flags |= ETH_TEST_FL_FAILED;
13731                 data[TG3_LINK_TEST] = 1;
13732         }
13733         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13734                 int err, err2 = 0, irq_sync = 0;
13735
13736                 if (netif_running(dev)) {
13737                         tg3_phy_stop(tp);
13738                         tg3_netif_stop(tp);
13739                         irq_sync = 1;
13740                 }
13741
13742                 tg3_full_lock(tp, irq_sync);
13743                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13744                 err = tg3_nvram_lock(tp);
13745                 tg3_halt_cpu(tp, RX_CPU_BASE);
13746                 if (!tg3_flag(tp, 5705_PLUS))
13747                         tg3_halt_cpu(tp, TX_CPU_BASE);
13748                 if (!err)
13749                         tg3_nvram_unlock(tp);
13750
13751                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13752                         tg3_phy_reset(tp);
13753
13754                 if (tg3_test_registers(tp) != 0) {
13755                         etest->flags |= ETH_TEST_FL_FAILED;
13756                         data[TG3_REGISTER_TEST] = 1;
13757                 }
13758
13759                 if (tg3_test_memory(tp) != 0) {
13760                         etest->flags |= ETH_TEST_FL_FAILED;
13761                         data[TG3_MEMORY_TEST] = 1;
13762                 }
13763
13764                 if (doextlpbk)
13765                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13766
13767                 if (tg3_test_loopback(tp, data, doextlpbk))
13768                         etest->flags |= ETH_TEST_FL_FAILED;
13769
13770                 tg3_full_unlock(tp);
13771
13772                 if (tg3_test_interrupt(tp) != 0) {
13773                         etest->flags |= ETH_TEST_FL_FAILED;
13774                         data[TG3_INTERRUPT_TEST] = 1;
13775                 }
13776
13777                 tg3_full_lock(tp, 0);
13778
13779                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13780                 if (netif_running(dev)) {
13781                         tg3_flag_set(tp, INIT_COMPLETE);
13782                         err2 = tg3_restart_hw(tp, true);
13783                         if (!err2)
13784                                 tg3_netif_start(tp);
13785                 }
13786
13787                 tg3_full_unlock(tp);
13788
13789                 if (irq_sync && !err2)
13790                         tg3_phy_start(tp);
13791         }
13792         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13793                 tg3_power_down_prepare(tp);
13794
13795 }
13796
13797 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13798 {
13799         struct tg3 *tp = netdev_priv(dev);
13800         struct hwtstamp_config stmpconf;
13801
13802         if (!tg3_flag(tp, PTP_CAPABLE))
13803                 return -EOPNOTSUPP;
13804
13805         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13806                 return -EFAULT;
13807
13808         if (stmpconf.flags)
13809                 return -EINVAL;
13810
13811         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13812             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13813                 return -ERANGE;
13814
13815         switch (stmpconf.rx_filter) {
13816         case HWTSTAMP_FILTER_NONE:
13817                 tp->rxptpctl = 0;
13818                 break;
13819         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13820                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13821                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13822                 break;
13823         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13824                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13825                                TG3_RX_PTP_CTL_SYNC_EVNT;
13826                 break;
13827         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13828                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13829                                TG3_RX_PTP_CTL_DELAY_REQ;
13830                 break;
13831         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13832                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13833                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13834                 break;
13835         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13836                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13837                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13838                 break;
13839         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13840                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13841                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13842                 break;
13843         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13845                                TG3_RX_PTP_CTL_SYNC_EVNT;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13849                                TG3_RX_PTP_CTL_SYNC_EVNT;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13853                                TG3_RX_PTP_CTL_SYNC_EVNT;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13857                                TG3_RX_PTP_CTL_DELAY_REQ;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13861                                TG3_RX_PTP_CTL_DELAY_REQ;
13862                 break;
13863         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13865                                TG3_RX_PTP_CTL_DELAY_REQ;
13866                 break;
13867         default:
13868                 return -ERANGE;
13869         }
13870
13871         if (netif_running(dev) && tp->rxptpctl)
13872                 tw32(TG3_RX_PTP_CTL,
13873                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13874
13875         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13876                 tg3_flag_set(tp, TX_TSTAMP_EN);
13877         else
13878                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13879
13880         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13881                 -EFAULT : 0;
13882 }
13883
13884 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13885 {
13886         struct tg3 *tp = netdev_priv(dev);
13887         struct hwtstamp_config stmpconf;
13888
13889         if (!tg3_flag(tp, PTP_CAPABLE))
13890                 return -EOPNOTSUPP;
13891
13892         stmpconf.flags = 0;
13893         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13894                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13895
13896         switch (tp->rxptpctl) {
13897         case 0:
13898                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13899                 break;
13900         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13901                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13902                 break;
13903         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13904                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13905                 break;
13906         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13907                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13908                 break;
13909         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13910                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13911                 break;
13912         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13913                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13914                 break;
13915         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13916                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13917                 break;
13918         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13919                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13920                 break;
13921         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13922                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13923                 break;
13924         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13925                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13926                 break;
13927         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13928                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13929                 break;
13930         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13932                 break;
13933         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13935                 break;
13936         default:
13937                 WARN_ON_ONCE(1);
13938                 return -ERANGE;
13939         }
13940
13941         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13942                 -EFAULT : 0;
13943 }
13944
13945 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13946 {
13947         struct mii_ioctl_data *data = if_mii(ifr);
13948         struct tg3 *tp = netdev_priv(dev);
13949         int err;
13950
13951         if (tg3_flag(tp, USE_PHYLIB)) {
13952                 struct phy_device *phydev;
13953                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13954                         return -EAGAIN;
13955                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13956                 return phy_mii_ioctl(phydev, ifr, cmd);
13957         }
13958
13959         switch (cmd) {
13960         case SIOCGMIIPHY:
13961                 data->phy_id = tp->phy_addr;
13962
13963                 /* fallthru */
13964         case SIOCGMIIREG: {
13965                 u32 mii_regval;
13966
13967                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13968                         break;                  /* We have no PHY */
13969
13970                 if (!netif_running(dev))
13971                         return -EAGAIN;
13972
13973                 spin_lock_bh(&tp->lock);
13974                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13975                                     data->reg_num & 0x1f, &mii_regval);
13976                 spin_unlock_bh(&tp->lock);
13977
13978                 data->val_out = mii_regval;
13979
13980                 return err;
13981         }
13982
13983         case SIOCSMIIREG:
13984                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13985                         break;                  /* We have no PHY */
13986
13987                 if (!netif_running(dev))
13988                         return -EAGAIN;
13989
13990                 spin_lock_bh(&tp->lock);
13991                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13992                                      data->reg_num & 0x1f, data->val_in);
13993                 spin_unlock_bh(&tp->lock);
13994
13995                 return err;
13996
13997         case SIOCSHWTSTAMP:
13998                 return tg3_hwtstamp_set(dev, ifr);
13999
14000         case SIOCGHWTSTAMP:
14001                 return tg3_hwtstamp_get(dev, ifr);
14002
14003         default:
14004                 /* do nothing */
14005                 break;
14006         }
14007         return -EOPNOTSUPP;
14008 }
14009
14010 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14011 {
14012         struct tg3 *tp = netdev_priv(dev);
14013
14014         memcpy(ec, &tp->coal, sizeof(*ec));
14015         return 0;
14016 }
14017
14018 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14019 {
14020         struct tg3 *tp = netdev_priv(dev);
14021         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14022         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14023
14024         if (!tg3_flag(tp, 5705_PLUS)) {
14025                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14026                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14027                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14028                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14029         }
14030
14031         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14032             (!ec->rx_coalesce_usecs) ||
14033             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14034             (!ec->tx_coalesce_usecs) ||
14035             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14036             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14037             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14038             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14039             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14040             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14041             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14042             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14043                 return -EINVAL;
14044
14045         /* Only copy relevant parameters, ignore all others. */
14046         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14047         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14048         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14049         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14050         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14051         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14052         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14053         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14054         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14055
14056         if (netif_running(dev)) {
14057                 tg3_full_lock(tp, 0);
14058                 __tg3_set_coalesce(tp, &tp->coal);
14059                 tg3_full_unlock(tp);
14060         }
14061         return 0;
14062 }
14063
14064 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14065 {
14066         struct tg3 *tp = netdev_priv(dev);
14067
14068         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14069                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14070                 return -EOPNOTSUPP;
14071         }
14072
14073         if (edata->advertised != tp->eee.advertised) {
14074                 netdev_warn(tp->dev,
14075                             "Direct manipulation of EEE advertisement is not supported\n");
14076                 return -EINVAL;
14077         }
14078
14079         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14080                 netdev_warn(tp->dev,
14081                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14082                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14083                 return -EINVAL;
14084         }
14085
14086         tp->eee = *edata;
14087
14088         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14089         tg3_warn_mgmt_link_flap(tp);
14090
14091         if (netif_running(tp->dev)) {
14092                 tg3_full_lock(tp, 0);
14093                 tg3_setup_eee(tp);
14094                 tg3_phy_reset(tp);
14095                 tg3_full_unlock(tp);
14096         }
14097
14098         return 0;
14099 }
14100
14101 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14102 {
14103         struct tg3 *tp = netdev_priv(dev);
14104
14105         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14106                 netdev_warn(tp->dev,
14107                             "Board does not support EEE!\n");
14108                 return -EOPNOTSUPP;
14109         }
14110
14111         *edata = tp->eee;
14112         return 0;
14113 }
14114
14115 static const struct ethtool_ops tg3_ethtool_ops = {
14116         .get_drvinfo            = tg3_get_drvinfo,
14117         .get_regs_len           = tg3_get_regs_len,
14118         .get_regs               = tg3_get_regs,
14119         .get_wol                = tg3_get_wol,
14120         .set_wol                = tg3_set_wol,
14121         .get_msglevel           = tg3_get_msglevel,
14122         .set_msglevel           = tg3_set_msglevel,
14123         .nway_reset             = tg3_nway_reset,
14124         .get_link               = ethtool_op_get_link,
14125         .get_eeprom_len         = tg3_get_eeprom_len,
14126         .get_eeprom             = tg3_get_eeprom,
14127         .set_eeprom             = tg3_set_eeprom,
14128         .get_ringparam          = tg3_get_ringparam,
14129         .set_ringparam          = tg3_set_ringparam,
14130         .get_pauseparam         = tg3_get_pauseparam,
14131         .set_pauseparam         = tg3_set_pauseparam,
14132         .self_test              = tg3_self_test,
14133         .get_strings            = tg3_get_strings,
14134         .set_phys_id            = tg3_set_phys_id,
14135         .get_ethtool_stats      = tg3_get_ethtool_stats,
14136         .get_coalesce           = tg3_get_coalesce,
14137         .set_coalesce           = tg3_set_coalesce,
14138         .get_sset_count         = tg3_get_sset_count,
14139         .get_rxnfc              = tg3_get_rxnfc,
14140         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14141         .get_rxfh               = tg3_get_rxfh,
14142         .set_rxfh               = tg3_set_rxfh,
14143         .get_channels           = tg3_get_channels,
14144         .set_channels           = tg3_set_channels,
14145         .get_ts_info            = tg3_get_ts_info,
14146         .get_eee                = tg3_get_eee,
14147         .set_eee                = tg3_set_eee,
14148         .get_link_ksettings     = tg3_get_link_ksettings,
14149         .set_link_ksettings     = tg3_set_link_ksettings,
14150 };
14151
14152 static void tg3_get_stats64(struct net_device *dev,
14153                             struct rtnl_link_stats64 *stats)
14154 {
14155         struct tg3 *tp = netdev_priv(dev);
14156
14157         spin_lock_bh(&tp->lock);
14158         if (!tp->hw_stats) {
14159                 *stats = tp->net_stats_prev;
14160                 spin_unlock_bh(&tp->lock);
14161                 return;
14162         }
14163
14164         tg3_get_nstats(tp, stats);
14165         spin_unlock_bh(&tp->lock);
14166 }
14167
14168 static void tg3_set_rx_mode(struct net_device *dev)
14169 {
14170         struct tg3 *tp = netdev_priv(dev);
14171
14172         if (!netif_running(dev))
14173                 return;
14174
14175         tg3_full_lock(tp, 0);
14176         __tg3_set_rx_mode(dev);
14177         tg3_full_unlock(tp);
14178 }
14179
14180 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14181                                int new_mtu)
14182 {
14183         dev->mtu = new_mtu;
14184
14185         if (new_mtu > ETH_DATA_LEN) {
14186                 if (tg3_flag(tp, 5780_CLASS)) {
14187                         netdev_update_features(dev);
14188                         tg3_flag_clear(tp, TSO_CAPABLE);
14189                 } else {
14190                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14191                 }
14192         } else {
14193                 if (tg3_flag(tp, 5780_CLASS)) {
14194                         tg3_flag_set(tp, TSO_CAPABLE);
14195                         netdev_update_features(dev);
14196                 }
14197                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14198         }
14199 }
14200
14201 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14202 {
14203         struct tg3 *tp = netdev_priv(dev);
14204         int err;
14205         bool reset_phy = false;
14206
14207         if (!netif_running(dev)) {
14208                 /* We'll just catch it later when the
14209                  * device is up'd.
14210                  */
14211                 tg3_set_mtu(dev, tp, new_mtu);
14212                 return 0;
14213         }
14214
14215         tg3_phy_stop(tp);
14216
14217         tg3_netif_stop(tp);
14218
14219         tg3_set_mtu(dev, tp, new_mtu);
14220
14221         tg3_full_lock(tp, 1);
14222
14223         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14224
14225         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14226          * breaks all requests to 256 bytes.
14227          */
14228         if (tg3_asic_rev(tp) == ASIC_REV_57766)
14229                 reset_phy = true;
14230
14231         err = tg3_restart_hw(tp, reset_phy);
14232
14233         if (!err)
14234                 tg3_netif_start(tp);
14235
14236         tg3_full_unlock(tp);
14237
14238         if (!err)
14239                 tg3_phy_start(tp);
14240
14241         return err;
14242 }
14243
14244 static const struct net_device_ops tg3_netdev_ops = {
14245         .ndo_open               = tg3_open,
14246         .ndo_stop               = tg3_close,
14247         .ndo_start_xmit         = tg3_start_xmit,
14248         .ndo_get_stats64        = tg3_get_stats64,
14249         .ndo_validate_addr      = eth_validate_addr,
14250         .ndo_set_rx_mode        = tg3_set_rx_mode,
14251         .ndo_set_mac_address    = tg3_set_mac_addr,
14252         .ndo_do_ioctl           = tg3_ioctl,
14253         .ndo_tx_timeout         = tg3_tx_timeout,
14254         .ndo_change_mtu         = tg3_change_mtu,
14255         .ndo_fix_features       = tg3_fix_features,
14256         .ndo_set_features       = tg3_set_features,
14257 #ifdef CONFIG_NET_POLL_CONTROLLER
14258         .ndo_poll_controller    = tg3_poll_controller,
14259 #endif
14260 };
14261
14262 static void tg3_get_eeprom_size(struct tg3 *tp)
14263 {
14264         u32 cursize, val, magic;
14265
14266         tp->nvram_size = EEPROM_CHIP_SIZE;
14267
14268         if (tg3_nvram_read(tp, 0, &magic) != 0)
14269                 return;
14270
14271         if ((magic != TG3_EEPROM_MAGIC) &&
14272             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14273             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14274                 return;
14275
14276         /*
14277          * Size the chip by reading offsets at increasing powers of two.
14278          * When we encounter our validation signature, we know the addressing
14279          * has wrapped around, and thus have our chip size.
14280          */
14281         cursize = 0x10;
14282
14283         while (cursize < tp->nvram_size) {
14284                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14285                         return;
14286
14287                 if (val == magic)
14288                         break;
14289
14290                 cursize <<= 1;
14291         }
14292
14293         tp->nvram_size = cursize;
14294 }
14295
14296 static void tg3_get_nvram_size(struct tg3 *tp)
14297 {
14298         u32 val;
14299
14300         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14301                 return;
14302
14303         /* Selfboot format */
14304         if (val != TG3_EEPROM_MAGIC) {
14305                 tg3_get_eeprom_size(tp);
14306                 return;
14307         }
14308
14309         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14310                 if (val != 0) {
14311                         /* This is confusing.  We want to operate on the
14312                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14313                          * call will read from NVRAM and byteswap the data
14314                          * according to the byteswapping settings for all
14315                          * other register accesses.  This ensures the data we
14316                          * want will always reside in the lower 16-bits.
14317                          * However, the data in NVRAM is in LE format, which
14318                          * means the data from the NVRAM read will always be
14319                          * opposite the endianness of the CPU.  The 16-bit
14320                          * byteswap then brings the data to CPU endianness.
14321                          */
14322                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14323                         return;
14324                 }
14325         }
14326         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14327 }
14328
14329 static void tg3_get_nvram_info(struct tg3 *tp)
14330 {
14331         u32 nvcfg1;
14332
14333         nvcfg1 = tr32(NVRAM_CFG1);
14334         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14335                 tg3_flag_set(tp, FLASH);
14336         } else {
14337                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14338                 tw32(NVRAM_CFG1, nvcfg1);
14339         }
14340
14341         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14342             tg3_flag(tp, 5780_CLASS)) {
14343                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14344                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14345                         tp->nvram_jedecnum = JEDEC_ATMEL;
14346                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14347                         tg3_flag_set(tp, NVRAM_BUFFERED);
14348                         break;
14349                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14350                         tp->nvram_jedecnum = JEDEC_ATMEL;
14351                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14352                         break;
14353                 case FLASH_VENDOR_ATMEL_EEPROM:
14354                         tp->nvram_jedecnum = JEDEC_ATMEL;
14355                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14356                         tg3_flag_set(tp, NVRAM_BUFFERED);
14357                         break;
14358                 case FLASH_VENDOR_ST:
14359                         tp->nvram_jedecnum = JEDEC_ST;
14360                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14361                         tg3_flag_set(tp, NVRAM_BUFFERED);
14362                         break;
14363                 case FLASH_VENDOR_SAIFUN:
14364                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14365                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14366                         break;
14367                 case FLASH_VENDOR_SST_SMALL:
14368                 case FLASH_VENDOR_SST_LARGE:
14369                         tp->nvram_jedecnum = JEDEC_SST;
14370                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14371                         break;
14372                 }
14373         } else {
14374                 tp->nvram_jedecnum = JEDEC_ATMEL;
14375                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14376                 tg3_flag_set(tp, NVRAM_BUFFERED);
14377         }
14378 }
14379
14380 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14381 {
14382         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14383         case FLASH_5752PAGE_SIZE_256:
14384                 tp->nvram_pagesize = 256;
14385                 break;
14386         case FLASH_5752PAGE_SIZE_512:
14387                 tp->nvram_pagesize = 512;
14388                 break;
14389         case FLASH_5752PAGE_SIZE_1K:
14390                 tp->nvram_pagesize = 1024;
14391                 break;
14392         case FLASH_5752PAGE_SIZE_2K:
14393                 tp->nvram_pagesize = 2048;
14394                 break;
14395         case FLASH_5752PAGE_SIZE_4K:
14396                 tp->nvram_pagesize = 4096;
14397                 break;
14398         case FLASH_5752PAGE_SIZE_264:
14399                 tp->nvram_pagesize = 264;
14400                 break;
14401         case FLASH_5752PAGE_SIZE_528:
14402                 tp->nvram_pagesize = 528;
14403                 break;
14404         }
14405 }
14406
14407 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14408 {
14409         u32 nvcfg1;
14410
14411         nvcfg1 = tr32(NVRAM_CFG1);
14412
14413         /* NVRAM protection for TPM */
14414         if (nvcfg1 & (1 << 27))
14415                 tg3_flag_set(tp, PROTECTED_NVRAM);
14416
14417         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14418         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14419         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14420                 tp->nvram_jedecnum = JEDEC_ATMEL;
14421                 tg3_flag_set(tp, NVRAM_BUFFERED);
14422                 break;
14423         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14424                 tp->nvram_jedecnum = JEDEC_ATMEL;
14425                 tg3_flag_set(tp, NVRAM_BUFFERED);
14426                 tg3_flag_set(tp, FLASH);
14427                 break;
14428         case FLASH_5752VENDOR_ST_M45PE10:
14429         case FLASH_5752VENDOR_ST_M45PE20:
14430         case FLASH_5752VENDOR_ST_M45PE40:
14431                 tp->nvram_jedecnum = JEDEC_ST;
14432                 tg3_flag_set(tp, NVRAM_BUFFERED);
14433                 tg3_flag_set(tp, FLASH);
14434                 break;
14435         }
14436
14437         if (tg3_flag(tp, FLASH)) {
14438                 tg3_nvram_get_pagesize(tp, nvcfg1);
14439         } else {
14440                 /* For eeprom, set pagesize to maximum eeprom size */
14441                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14442
14443                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14444                 tw32(NVRAM_CFG1, nvcfg1);
14445         }
14446 }
14447
14448 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14449 {
14450         u32 nvcfg1, protect = 0;
14451
14452         nvcfg1 = tr32(NVRAM_CFG1);
14453
14454         /* NVRAM protection for TPM */
14455         if (nvcfg1 & (1 << 27)) {
14456                 tg3_flag_set(tp, PROTECTED_NVRAM);
14457                 protect = 1;
14458         }
14459
14460         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14461         switch (nvcfg1) {
14462         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14463         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14464         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14465         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14466                 tp->nvram_jedecnum = JEDEC_ATMEL;
14467                 tg3_flag_set(tp, NVRAM_BUFFERED);
14468                 tg3_flag_set(tp, FLASH);
14469                 tp->nvram_pagesize = 264;
14470                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14471                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14472                         tp->nvram_size = (protect ? 0x3e200 :
14473                                           TG3_NVRAM_SIZE_512KB);
14474                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14475                         tp->nvram_size = (protect ? 0x1f200 :
14476                                           TG3_NVRAM_SIZE_256KB);
14477                 else
14478                         tp->nvram_size = (protect ? 0x1f200 :
14479                                           TG3_NVRAM_SIZE_128KB);
14480                 break;
14481         case FLASH_5752VENDOR_ST_M45PE10:
14482         case FLASH_5752VENDOR_ST_M45PE20:
14483         case FLASH_5752VENDOR_ST_M45PE40:
14484                 tp->nvram_jedecnum = JEDEC_ST;
14485                 tg3_flag_set(tp, NVRAM_BUFFERED);
14486                 tg3_flag_set(tp, FLASH);
14487                 tp->nvram_pagesize = 256;
14488                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14489                         tp->nvram_size = (protect ?
14490                                           TG3_NVRAM_SIZE_64KB :
14491                                           TG3_NVRAM_SIZE_128KB);
14492                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14493                         tp->nvram_size = (protect ?
14494                                           TG3_NVRAM_SIZE_64KB :
14495                                           TG3_NVRAM_SIZE_256KB);
14496                 else
14497                         tp->nvram_size = (protect ?
14498                                           TG3_NVRAM_SIZE_128KB :
14499                                           TG3_NVRAM_SIZE_512KB);
14500                 break;
14501         }
14502 }
14503
14504 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14505 {
14506         u32 nvcfg1;
14507
14508         nvcfg1 = tr32(NVRAM_CFG1);
14509
14510         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14511         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14512         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14513         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14514         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14515                 tp->nvram_jedecnum = JEDEC_ATMEL;
14516                 tg3_flag_set(tp, NVRAM_BUFFERED);
14517                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14518
14519                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14520                 tw32(NVRAM_CFG1, nvcfg1);
14521                 break;
14522         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14523         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14524         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14525         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14526                 tp->nvram_jedecnum = JEDEC_ATMEL;
14527                 tg3_flag_set(tp, NVRAM_BUFFERED);
14528                 tg3_flag_set(tp, FLASH);
14529                 tp->nvram_pagesize = 264;
14530                 break;
14531         case FLASH_5752VENDOR_ST_M45PE10:
14532         case FLASH_5752VENDOR_ST_M45PE20:
14533         case FLASH_5752VENDOR_ST_M45PE40:
14534                 tp->nvram_jedecnum = JEDEC_ST;
14535                 tg3_flag_set(tp, NVRAM_BUFFERED);
14536                 tg3_flag_set(tp, FLASH);
14537                 tp->nvram_pagesize = 256;
14538                 break;
14539         }
14540 }
14541
14542 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14543 {
14544         u32 nvcfg1, protect = 0;
14545
14546         nvcfg1 = tr32(NVRAM_CFG1);
14547
14548         /* NVRAM protection for TPM */
14549         if (nvcfg1 & (1 << 27)) {
14550                 tg3_flag_set(tp, PROTECTED_NVRAM);
14551                 protect = 1;
14552         }
14553
14554         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14555         switch (nvcfg1) {
14556         case FLASH_5761VENDOR_ATMEL_ADB021D:
14557         case FLASH_5761VENDOR_ATMEL_ADB041D:
14558         case FLASH_5761VENDOR_ATMEL_ADB081D:
14559         case FLASH_5761VENDOR_ATMEL_ADB161D:
14560         case FLASH_5761VENDOR_ATMEL_MDB021D:
14561         case FLASH_5761VENDOR_ATMEL_MDB041D:
14562         case FLASH_5761VENDOR_ATMEL_MDB081D:
14563         case FLASH_5761VENDOR_ATMEL_MDB161D:
14564                 tp->nvram_jedecnum = JEDEC_ATMEL;
14565                 tg3_flag_set(tp, NVRAM_BUFFERED);
14566                 tg3_flag_set(tp, FLASH);
14567                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14568                 tp->nvram_pagesize = 256;
14569                 break;
14570         case FLASH_5761VENDOR_ST_A_M45PE20:
14571         case FLASH_5761VENDOR_ST_A_M45PE40:
14572         case FLASH_5761VENDOR_ST_A_M45PE80:
14573         case FLASH_5761VENDOR_ST_A_M45PE16:
14574         case FLASH_5761VENDOR_ST_M_M45PE20:
14575         case FLASH_5761VENDOR_ST_M_M45PE40:
14576         case FLASH_5761VENDOR_ST_M_M45PE80:
14577         case FLASH_5761VENDOR_ST_M_M45PE16:
14578                 tp->nvram_jedecnum = JEDEC_ST;
14579                 tg3_flag_set(tp, NVRAM_BUFFERED);
14580                 tg3_flag_set(tp, FLASH);
14581                 tp->nvram_pagesize = 256;
14582                 break;
14583         }
14584
14585         if (protect) {
14586                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14587         } else {
14588                 switch (nvcfg1) {
14589                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14590                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14591                 case FLASH_5761VENDOR_ST_A_M45PE16:
14592                 case FLASH_5761VENDOR_ST_M_M45PE16:
14593                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14594                         break;
14595                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14596                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14597                 case FLASH_5761VENDOR_ST_A_M45PE80:
14598                 case FLASH_5761VENDOR_ST_M_M45PE80:
14599                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14600                         break;
14601                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14602                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14603                 case FLASH_5761VENDOR_ST_A_M45PE40:
14604                 case FLASH_5761VENDOR_ST_M_M45PE40:
14605                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14606                         break;
14607                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14608                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14609                 case FLASH_5761VENDOR_ST_A_M45PE20:
14610                 case FLASH_5761VENDOR_ST_M_M45PE20:
14611                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14612                         break;
14613                 }
14614         }
14615 }
14616
14617 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14618 {
14619         tp->nvram_jedecnum = JEDEC_ATMEL;
14620         tg3_flag_set(tp, NVRAM_BUFFERED);
14621         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14622 }
14623
14624 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14625 {
14626         u32 nvcfg1;
14627
14628         nvcfg1 = tr32(NVRAM_CFG1);
14629
14630         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14631         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14632         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14633                 tp->nvram_jedecnum = JEDEC_ATMEL;
14634                 tg3_flag_set(tp, NVRAM_BUFFERED);
14635                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14636
14637                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14638                 tw32(NVRAM_CFG1, nvcfg1);
14639                 return;
14640         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14641         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14642         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14643         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14644         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14645         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14646         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14647                 tp->nvram_jedecnum = JEDEC_ATMEL;
14648                 tg3_flag_set(tp, NVRAM_BUFFERED);
14649                 tg3_flag_set(tp, FLASH);
14650
14651                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14652                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14653                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14654                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14655                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14656                         break;
14657                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14658                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14659                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14660                         break;
14661                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14662                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14663                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14664                         break;
14665                 }
14666                 break;
14667         case FLASH_5752VENDOR_ST_M45PE10:
14668         case FLASH_5752VENDOR_ST_M45PE20:
14669         case FLASH_5752VENDOR_ST_M45PE40:
14670                 tp->nvram_jedecnum = JEDEC_ST;
14671                 tg3_flag_set(tp, NVRAM_BUFFERED);
14672                 tg3_flag_set(tp, FLASH);
14673
14674                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14675                 case FLASH_5752VENDOR_ST_M45PE10:
14676                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14677                         break;
14678                 case FLASH_5752VENDOR_ST_M45PE20:
14679                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14680                         break;
14681                 case FLASH_5752VENDOR_ST_M45PE40:
14682                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14683                         break;
14684                 }
14685                 break;
14686         default:
14687                 tg3_flag_set(tp, NO_NVRAM);
14688                 return;
14689         }
14690
14691         tg3_nvram_get_pagesize(tp, nvcfg1);
14692         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14693                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14694 }
14695
14696
14697 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14698 {
14699         u32 nvcfg1;
14700
14701         nvcfg1 = tr32(NVRAM_CFG1);
14702
14703         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14704         case FLASH_5717VENDOR_ATMEL_EEPROM:
14705         case FLASH_5717VENDOR_MICRO_EEPROM:
14706                 tp->nvram_jedecnum = JEDEC_ATMEL;
14707                 tg3_flag_set(tp, NVRAM_BUFFERED);
14708                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14709
14710                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14711                 tw32(NVRAM_CFG1, nvcfg1);
14712                 return;
14713         case FLASH_5717VENDOR_ATMEL_MDB011D:
14714         case FLASH_5717VENDOR_ATMEL_ADB011B:
14715         case FLASH_5717VENDOR_ATMEL_ADB011D:
14716         case FLASH_5717VENDOR_ATMEL_MDB021D:
14717         case FLASH_5717VENDOR_ATMEL_ADB021B:
14718         case FLASH_5717VENDOR_ATMEL_ADB021D:
14719         case FLASH_5717VENDOR_ATMEL_45USPT:
14720                 tp->nvram_jedecnum = JEDEC_ATMEL;
14721                 tg3_flag_set(tp, NVRAM_BUFFERED);
14722                 tg3_flag_set(tp, FLASH);
14723
14724                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14725                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14726                         /* Detect size with tg3_nvram_get_size() */
14727                         break;
14728                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14729                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14730                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14731                         break;
14732                 default:
14733                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14734                         break;
14735                 }
14736                 break;
14737         case FLASH_5717VENDOR_ST_M_M25PE10:
14738         case FLASH_5717VENDOR_ST_A_M25PE10:
14739         case FLASH_5717VENDOR_ST_M_M45PE10:
14740         case FLASH_5717VENDOR_ST_A_M45PE10:
14741         case FLASH_5717VENDOR_ST_M_M25PE20:
14742         case FLASH_5717VENDOR_ST_A_M25PE20:
14743         case FLASH_5717VENDOR_ST_M_M45PE20:
14744         case FLASH_5717VENDOR_ST_A_M45PE20:
14745         case FLASH_5717VENDOR_ST_25USPT:
14746         case FLASH_5717VENDOR_ST_45USPT:
14747                 tp->nvram_jedecnum = JEDEC_ST;
14748                 tg3_flag_set(tp, NVRAM_BUFFERED);
14749                 tg3_flag_set(tp, FLASH);
14750
14751                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14752                 case FLASH_5717VENDOR_ST_M_M25PE20:
14753                 case FLASH_5717VENDOR_ST_M_M45PE20:
14754                         /* Detect size with tg3_nvram_get_size() */
14755                         break;
14756                 case FLASH_5717VENDOR_ST_A_M25PE20:
14757                 case FLASH_5717VENDOR_ST_A_M45PE20:
14758                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14759                         break;
14760                 default:
14761                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14762                         break;
14763                 }
14764                 break;
14765         default:
14766                 tg3_flag_set(tp, NO_NVRAM);
14767                 return;
14768         }
14769
14770         tg3_nvram_get_pagesize(tp, nvcfg1);
14771         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14772                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14773 }
14774
14775 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14776 {
14777         u32 nvcfg1, nvmpinstrp;
14778
14779         nvcfg1 = tr32(NVRAM_CFG1);
14780         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14781
14782         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14783                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14784                         tg3_flag_set(tp, NO_NVRAM);
14785                         return;
14786                 }
14787
14788                 switch (nvmpinstrp) {
14789                 case FLASH_5762_EEPROM_HD:
14790                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14791                         break;
14792                 case FLASH_5762_EEPROM_LD:
14793                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14794                         break;
14795                 case FLASH_5720VENDOR_M_ST_M45PE20:
14796                         /* This pinstrap supports multiple sizes, so force it
14797                          * to read the actual size from location 0xf0.
14798                          */
14799                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14800                         break;
14801                 }
14802         }
14803
14804         switch (nvmpinstrp) {
14805         case FLASH_5720_EEPROM_HD:
14806         case FLASH_5720_EEPROM_LD:
14807                 tp->nvram_jedecnum = JEDEC_ATMEL;
14808                 tg3_flag_set(tp, NVRAM_BUFFERED);
14809
14810                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14811                 tw32(NVRAM_CFG1, nvcfg1);
14812                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14813                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14814                 else
14815                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14816                 return;
14817         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14818         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14819         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14820         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14821         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14822         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14823         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14824         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14825         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14826         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14827         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14828         case FLASH_5720VENDOR_ATMEL_45USPT:
14829                 tp->nvram_jedecnum = JEDEC_ATMEL;
14830                 tg3_flag_set(tp, NVRAM_BUFFERED);
14831                 tg3_flag_set(tp, FLASH);
14832
14833                 switch (nvmpinstrp) {
14834                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14835                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14836                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14837                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14838                         break;
14839                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14840                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14841                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14842                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14843                         break;
14844                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14845                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14846                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14847                         break;
14848                 default:
14849                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14850                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14851                         break;
14852                 }
14853                 break;
14854         case FLASH_5720VENDOR_M_ST_M25PE10:
14855         case FLASH_5720VENDOR_M_ST_M45PE10:
14856         case FLASH_5720VENDOR_A_ST_M25PE10:
14857         case FLASH_5720VENDOR_A_ST_M45PE10:
14858         case FLASH_5720VENDOR_M_ST_M25PE20:
14859         case FLASH_5720VENDOR_M_ST_M45PE20:
14860         case FLASH_5720VENDOR_A_ST_M25PE20:
14861         case FLASH_5720VENDOR_A_ST_M45PE20:
14862         case FLASH_5720VENDOR_M_ST_M25PE40:
14863         case FLASH_5720VENDOR_M_ST_M45PE40:
14864         case FLASH_5720VENDOR_A_ST_M25PE40:
14865         case FLASH_5720VENDOR_A_ST_M45PE40:
14866         case FLASH_5720VENDOR_M_ST_M25PE80:
14867         case FLASH_5720VENDOR_M_ST_M45PE80:
14868         case FLASH_5720VENDOR_A_ST_M25PE80:
14869         case FLASH_5720VENDOR_A_ST_M45PE80:
14870         case FLASH_5720VENDOR_ST_25USPT:
14871         case FLASH_5720VENDOR_ST_45USPT:
14872                 tp->nvram_jedecnum = JEDEC_ST;
14873                 tg3_flag_set(tp, NVRAM_BUFFERED);
14874                 tg3_flag_set(tp, FLASH);
14875
14876                 switch (nvmpinstrp) {
14877                 case FLASH_5720VENDOR_M_ST_M25PE20:
14878                 case FLASH_5720VENDOR_M_ST_M45PE20:
14879                 case FLASH_5720VENDOR_A_ST_M25PE20:
14880                 case FLASH_5720VENDOR_A_ST_M45PE20:
14881                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14882                         break;
14883                 case FLASH_5720VENDOR_M_ST_M25PE40:
14884                 case FLASH_5720VENDOR_M_ST_M45PE40:
14885                 case FLASH_5720VENDOR_A_ST_M25PE40:
14886                 case FLASH_5720VENDOR_A_ST_M45PE40:
14887                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14888                         break;
14889                 case FLASH_5720VENDOR_M_ST_M25PE80:
14890                 case FLASH_5720VENDOR_M_ST_M45PE80:
14891                 case FLASH_5720VENDOR_A_ST_M25PE80:
14892                 case FLASH_5720VENDOR_A_ST_M45PE80:
14893                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14894                         break;
14895                 default:
14896                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14897                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14898                         break;
14899                 }
14900                 break;
14901         default:
14902                 tg3_flag_set(tp, NO_NVRAM);
14903                 return;
14904         }
14905
14906         tg3_nvram_get_pagesize(tp, nvcfg1);
14907         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14908                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14909
14910         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14911                 u32 val;
14912
14913                 if (tg3_nvram_read(tp, 0, &val))
14914                         return;
14915
14916                 if (val != TG3_EEPROM_MAGIC &&
14917                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14918                         tg3_flag_set(tp, NO_NVRAM);
14919         }
14920 }
14921
14922 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14923 static void tg3_nvram_init(struct tg3 *tp)
14924 {
14925         if (tg3_flag(tp, IS_SSB_CORE)) {
14926                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14927                 tg3_flag_clear(tp, NVRAM);
14928                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14929                 tg3_flag_set(tp, NO_NVRAM);
14930                 return;
14931         }
14932
14933         tw32_f(GRC_EEPROM_ADDR,
14934              (EEPROM_ADDR_FSM_RESET |
14935               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14936                EEPROM_ADDR_CLKPERD_SHIFT)));
14937
14938         msleep(1);
14939
14940         /* Enable seeprom accesses. */
14941         tw32_f(GRC_LOCAL_CTRL,
14942              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14943         udelay(100);
14944
14945         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14946             tg3_asic_rev(tp) != ASIC_REV_5701) {
14947                 tg3_flag_set(tp, NVRAM);
14948
14949                 if (tg3_nvram_lock(tp)) {
14950                         netdev_warn(tp->dev,
14951                                     "Cannot get nvram lock, %s failed\n",
14952                                     __func__);
14953                         return;
14954                 }
14955                 tg3_enable_nvram_access(tp);
14956
14957                 tp->nvram_size = 0;
14958
14959                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14960                         tg3_get_5752_nvram_info(tp);
14961                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14962                         tg3_get_5755_nvram_info(tp);
14963                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14964                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14965                          tg3_asic_rev(tp) == ASIC_REV_5785)
14966                         tg3_get_5787_nvram_info(tp);
14967                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14968                         tg3_get_5761_nvram_info(tp);
14969                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14970                         tg3_get_5906_nvram_info(tp);
14971                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14972                          tg3_flag(tp, 57765_CLASS))
14973                         tg3_get_57780_nvram_info(tp);
14974                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14975                          tg3_asic_rev(tp) == ASIC_REV_5719)
14976                         tg3_get_5717_nvram_info(tp);
14977                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14978                          tg3_asic_rev(tp) == ASIC_REV_5762)
14979                         tg3_get_5720_nvram_info(tp);
14980                 else
14981                         tg3_get_nvram_info(tp);
14982
14983                 if (tp->nvram_size == 0)
14984                         tg3_get_nvram_size(tp);
14985
14986                 tg3_disable_nvram_access(tp);
14987                 tg3_nvram_unlock(tp);
14988
14989         } else {
14990                 tg3_flag_clear(tp, NVRAM);
14991                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14992
14993                 tg3_get_eeprom_size(tp);
14994         }
14995 }
14996
14997 struct subsys_tbl_ent {
14998         u16 subsys_vendor, subsys_devid;
14999         u32 phy_id;
15000 };
15001
15002 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15003         /* Broadcom boards. */
15004         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15005           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15006         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15007           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15008         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15009           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15010         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15011           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15012         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15013           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15014         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15015           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15016         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15017           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15018         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15019           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15020         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15021           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15022         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15023           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15024         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15025           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15026
15027         /* 3com boards. */
15028         { TG3PCI_SUBVENDOR_ID_3COM,
15029           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15030         { TG3PCI_SUBVENDOR_ID_3COM,
15031           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15032         { TG3PCI_SUBVENDOR_ID_3COM,
15033           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15034         { TG3PCI_SUBVENDOR_ID_3COM,
15035           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15036         { TG3PCI_SUBVENDOR_ID_3COM,
15037           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15038
15039         /* DELL boards. */
15040         { TG3PCI_SUBVENDOR_ID_DELL,
15041           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15042         { TG3PCI_SUBVENDOR_ID_DELL,
15043           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15044         { TG3PCI_SUBVENDOR_ID_DELL,
15045           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15046         { TG3PCI_SUBVENDOR_ID_DELL,
15047           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15048
15049         /* Compaq boards. */
15050         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15051           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15052         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15053           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15054         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15055           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15056         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15057           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15058         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15059           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15060
15061         /* IBM boards. */
15062         { TG3PCI_SUBVENDOR_ID_IBM,
15063           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15064 };
15065
15066 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15067 {
15068         int i;
15069
15070         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15071                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15072                      tp->pdev->subsystem_vendor) &&
15073                     (subsys_id_to_phy_id[i].subsys_devid ==
15074                      tp->pdev->subsystem_device))
15075                         return &subsys_id_to_phy_id[i];
15076         }
15077         return NULL;
15078 }
15079
15080 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15081 {
15082         u32 val;
15083
15084         tp->phy_id = TG3_PHY_ID_INVALID;
15085         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15086
15087         /* Assume an onboard device and WOL capable by default.  */
15088         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15089         tg3_flag_set(tp, WOL_CAP);
15090
15091         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15092                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15093                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15094                         tg3_flag_set(tp, IS_NIC);
15095                 }
15096                 val = tr32(VCPU_CFGSHDW);
15097                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15098                         tg3_flag_set(tp, ASPM_WORKAROUND);
15099                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15100                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15101                         tg3_flag_set(tp, WOL_ENABLE);
15102                         device_set_wakeup_enable(&tp->pdev->dev, true);
15103                 }
15104                 goto done;
15105         }
15106
15107         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15108         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15109                 u32 nic_cfg, led_cfg;
15110                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15111                 u32 nic_phy_id, ver, eeprom_phy_id;
15112                 int eeprom_phy_serdes = 0;
15113
15114                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15115                 tp->nic_sram_data_cfg = nic_cfg;
15116
15117                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15118                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15119                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15120                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15121                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15122                     (ver > 0) && (ver < 0x100))
15123                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15124
15125                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15126                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15127
15128                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15129                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15130                     tg3_asic_rev(tp) == ASIC_REV_5720)
15131                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15132
15133                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15134                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15135                         eeprom_phy_serdes = 1;
15136
15137                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15138                 if (nic_phy_id != 0) {
15139                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15140                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15141
15142                         eeprom_phy_id  = (id1 >> 16) << 10;
15143                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15144                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15145                 } else
15146                         eeprom_phy_id = 0;
15147
15148                 tp->phy_id = eeprom_phy_id;
15149                 if (eeprom_phy_serdes) {
15150                         if (!tg3_flag(tp, 5705_PLUS))
15151                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15152                         else
15153                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15154                 }
15155
15156                 if (tg3_flag(tp, 5750_PLUS))
15157                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15158                                     SHASTA_EXT_LED_MODE_MASK);
15159                 else
15160                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15161
15162                 switch (led_cfg) {
15163                 default:
15164                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15165                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15166                         break;
15167
15168                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15169                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15170                         break;
15171
15172                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15173                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15174
15175                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15176                          * read on some older 5700/5701 bootcode.
15177                          */
15178                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15179                             tg3_asic_rev(tp) == ASIC_REV_5701)
15180                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15181
15182                         break;
15183
15184                 case SHASTA_EXT_LED_SHARED:
15185                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15186                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15187                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15188                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15189                                                  LED_CTRL_MODE_PHY_2);
15190
15191                         if (tg3_flag(tp, 5717_PLUS) ||
15192                             tg3_asic_rev(tp) == ASIC_REV_5762)
15193                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15194                                                 LED_CTRL_BLINK_RATE_MASK;
15195
15196                         break;
15197
15198                 case SHASTA_EXT_LED_MAC:
15199                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15200                         break;
15201
15202                 case SHASTA_EXT_LED_COMBO:
15203                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15204                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15205                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15206                                                  LED_CTRL_MODE_PHY_2);
15207                         break;
15208
15209                 }
15210
15211                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15212                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15213                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15214                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15215
15216                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15217                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15218
15219                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15220                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15221                         if ((tp->pdev->subsystem_vendor ==
15222                              PCI_VENDOR_ID_ARIMA) &&
15223                             (tp->pdev->subsystem_device == 0x205a ||
15224                              tp->pdev->subsystem_device == 0x2063))
15225                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15226                 } else {
15227                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15228                         tg3_flag_set(tp, IS_NIC);
15229                 }
15230
15231                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15232                         tg3_flag_set(tp, ENABLE_ASF);
15233                         if (tg3_flag(tp, 5750_PLUS))
15234                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15235                 }
15236
15237                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15238                     tg3_flag(tp, 5750_PLUS))
15239                         tg3_flag_set(tp, ENABLE_APE);
15240
15241                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15242                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15243                         tg3_flag_clear(tp, WOL_CAP);
15244
15245                 if (tg3_flag(tp, WOL_CAP) &&
15246                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15247                         tg3_flag_set(tp, WOL_ENABLE);
15248                         device_set_wakeup_enable(&tp->pdev->dev, true);
15249                 }
15250
15251                 if (cfg2 & (1 << 17))
15252                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15253
15254                 /* serdes signal pre-emphasis in register 0x590 set by */
15255                 /* bootcode if bit 18 is set */
15256                 if (cfg2 & (1 << 18))
15257                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15258
15259                 if ((tg3_flag(tp, 57765_PLUS) ||
15260                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15261                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15262                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15263                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15264
15265                 if (tg3_flag(tp, PCI_EXPRESS)) {
15266                         u32 cfg3;
15267
15268                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15269                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15270                             !tg3_flag(tp, 57765_PLUS) &&
15271                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15272                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15273                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15274                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15275                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15276                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15277                 }
15278
15279                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15280                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15281                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15282                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15283                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15284                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15285
15286                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15287                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15288         }
15289 done:
15290         if (tg3_flag(tp, WOL_CAP))
15291                 device_set_wakeup_enable(&tp->pdev->dev,
15292                                          tg3_flag(tp, WOL_ENABLE));
15293         else
15294                 device_set_wakeup_capable(&tp->pdev->dev, false);
15295 }
15296
15297 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15298 {
15299         int i, err;
15300         u32 val2, off = offset * 8;
15301
15302         err = tg3_nvram_lock(tp);
15303         if (err)
15304                 return err;
15305
15306         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15307         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15308                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15309         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15310         udelay(10);
15311
15312         for (i = 0; i < 100; i++) {
15313                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15314                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15315                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15316                         break;
15317                 }
15318                 udelay(10);
15319         }
15320
15321         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15322
15323         tg3_nvram_unlock(tp);
15324         if (val2 & APE_OTP_STATUS_CMD_DONE)
15325                 return 0;
15326
15327         return -EBUSY;
15328 }
15329
15330 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15331 {
15332         int i;
15333         u32 val;
15334
15335         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15336         tw32(OTP_CTRL, cmd);
15337
15338         /* Wait for up to 1 ms for command to execute. */
15339         for (i = 0; i < 100; i++) {
15340                 val = tr32(OTP_STATUS);
15341                 if (val & OTP_STATUS_CMD_DONE)
15342                         break;
15343                 udelay(10);
15344         }
15345
15346         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15347 }
15348
15349 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15350  * configuration is a 32-bit value that straddles the alignment boundary.
15351  * We do two 32-bit reads and then shift and merge the results.
15352  */
15353 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15354 {
15355         u32 bhalf_otp, thalf_otp;
15356
15357         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15358
15359         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15360                 return 0;
15361
15362         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15363
15364         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15365                 return 0;
15366
15367         thalf_otp = tr32(OTP_READ_DATA);
15368
15369         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15370
15371         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15372                 return 0;
15373
15374         bhalf_otp = tr32(OTP_READ_DATA);
15375
15376         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15377 }
15378
15379 static void tg3_phy_init_link_config(struct tg3 *tp)
15380 {
15381         u32 adv = ADVERTISED_Autoneg;
15382
15383         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15384                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15385                         adv |= ADVERTISED_1000baseT_Half;
15386                 adv |= ADVERTISED_1000baseT_Full;
15387         }
15388
15389         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15390                 adv |= ADVERTISED_100baseT_Half |
15391                        ADVERTISED_100baseT_Full |
15392                        ADVERTISED_10baseT_Half |
15393                        ADVERTISED_10baseT_Full |
15394                        ADVERTISED_TP;
15395         else
15396                 adv |= ADVERTISED_FIBRE;
15397
15398         tp->link_config.advertising = adv;
15399         tp->link_config.speed = SPEED_UNKNOWN;
15400         tp->link_config.duplex = DUPLEX_UNKNOWN;
15401         tp->link_config.autoneg = AUTONEG_ENABLE;
15402         tp->link_config.active_speed = SPEED_UNKNOWN;
15403         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15404
15405         tp->old_link = -1;
15406 }
15407
15408 static int tg3_phy_probe(struct tg3 *tp)
15409 {
15410         u32 hw_phy_id_1, hw_phy_id_2;
15411         u32 hw_phy_id, hw_phy_id_masked;
15412         int err;
15413
15414         /* flow control autonegotiation is default behavior */
15415         tg3_flag_set(tp, PAUSE_AUTONEG);
15416         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15417
15418         if (tg3_flag(tp, ENABLE_APE)) {
15419                 switch (tp->pci_fn) {
15420                 case 0:
15421                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15422                         break;
15423                 case 1:
15424                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15425                         break;
15426                 case 2:
15427                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15428                         break;
15429                 case 3:
15430                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15431                         break;
15432                 }
15433         }
15434
15435         if (!tg3_flag(tp, ENABLE_ASF) &&
15436             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15437             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15438                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15439                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15440
15441         if (tg3_flag(tp, USE_PHYLIB))
15442                 return tg3_phy_init(tp);
15443
15444         /* Reading the PHY ID register can conflict with ASF
15445          * firmware access to the PHY hardware.
15446          */
15447         err = 0;
15448         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15449                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15450         } else {
15451                 /* Now read the physical PHY_ID from the chip and verify
15452                  * that it is sane.  If it doesn't look good, we fall back
15453                  * to either the hard-coded table based PHY_ID and failing
15454                  * that the value found in the eeprom area.
15455                  */
15456                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15457                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15458
15459                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15460                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15461                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15462
15463                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15464         }
15465
15466         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15467                 tp->phy_id = hw_phy_id;
15468                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15469                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15470                 else
15471                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15472         } else {
15473                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15474                         /* Do nothing, phy ID already set up in
15475                          * tg3_get_eeprom_hw_cfg().
15476                          */
15477                 } else {
15478                         struct subsys_tbl_ent *p;
15479
15480                         /* No eeprom signature?  Try the hardcoded
15481                          * subsys device table.
15482                          */
15483                         p = tg3_lookup_by_subsys(tp);
15484                         if (p) {
15485                                 tp->phy_id = p->phy_id;
15486                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15487                                 /* For now we saw the IDs 0xbc050cd0,
15488                                  * 0xbc050f80 and 0xbc050c30 on devices
15489                                  * connected to an BCM4785 and there are
15490                                  * probably more. Just assume that the phy is
15491                                  * supported when it is connected to a SSB core
15492                                  * for now.
15493                                  */
15494                                 return -ENODEV;
15495                         }
15496
15497                         if (!tp->phy_id ||
15498                             tp->phy_id == TG3_PHY_ID_BCM8002)
15499                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15500                 }
15501         }
15502
15503         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15504             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15505              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15506              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15507              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15508              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15509               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15510              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15511               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15512                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15513
15514                 tp->eee.supported = SUPPORTED_100baseT_Full |
15515                                     SUPPORTED_1000baseT_Full;
15516                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15517                                      ADVERTISED_1000baseT_Full;
15518                 tp->eee.eee_enabled = 1;
15519                 tp->eee.tx_lpi_enabled = 1;
15520                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15521         }
15522
15523         tg3_phy_init_link_config(tp);
15524
15525         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15526             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15527             !tg3_flag(tp, ENABLE_APE) &&
15528             !tg3_flag(tp, ENABLE_ASF)) {
15529                 u32 bmsr, dummy;
15530
15531                 tg3_readphy(tp, MII_BMSR, &bmsr);
15532                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15533                     (bmsr & BMSR_LSTATUS))
15534                         goto skip_phy_reset;
15535
15536                 err = tg3_phy_reset(tp);
15537                 if (err)
15538                         return err;
15539
15540                 tg3_phy_set_wirespeed(tp);
15541
15542                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15543                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15544                                             tp->link_config.flowctrl);
15545
15546                         tg3_writephy(tp, MII_BMCR,
15547                                      BMCR_ANENABLE | BMCR_ANRESTART);
15548                 }
15549         }
15550
15551 skip_phy_reset:
15552         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15553                 err = tg3_init_5401phy_dsp(tp);
15554                 if (err)
15555                         return err;
15556
15557                 err = tg3_init_5401phy_dsp(tp);
15558         }
15559
15560         return err;
15561 }
15562
15563 static void tg3_read_vpd(struct tg3 *tp)
15564 {
15565         u8 *vpd_data;
15566         unsigned int block_end, rosize, len;
15567         u32 vpdlen;
15568         int j, i = 0;
15569
15570         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15571         if (!vpd_data)
15572                 goto out_no_vpd;
15573
15574         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15575         if (i < 0)
15576                 goto out_not_found;
15577
15578         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15579         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15580         i += PCI_VPD_LRDT_TAG_SIZE;
15581
15582         if (block_end > vpdlen)
15583                 goto out_not_found;
15584
15585         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15586                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15587         if (j > 0) {
15588                 len = pci_vpd_info_field_size(&vpd_data[j]);
15589
15590                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15591                 if (j + len > block_end || len != 4 ||
15592                     memcmp(&vpd_data[j], "1028", 4))
15593                         goto partno;
15594
15595                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15596                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15597                 if (j < 0)
15598                         goto partno;
15599
15600                 len = pci_vpd_info_field_size(&vpd_data[j]);
15601
15602                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15603                 if (j + len > block_end)
15604                         goto partno;
15605
15606                 if (len >= sizeof(tp->fw_ver))
15607                         len = sizeof(tp->fw_ver) - 1;
15608                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15609                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15610                          &vpd_data[j]);
15611         }
15612
15613 partno:
15614         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15615                                       PCI_VPD_RO_KEYWORD_PARTNO);
15616         if (i < 0)
15617                 goto out_not_found;
15618
15619         len = pci_vpd_info_field_size(&vpd_data[i]);
15620
15621         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15622         if (len > TG3_BPN_SIZE ||
15623             (len + i) > vpdlen)
15624                 goto out_not_found;
15625
15626         memcpy(tp->board_part_number, &vpd_data[i], len);
15627
15628 out_not_found:
15629         kfree(vpd_data);
15630         if (tp->board_part_number[0])
15631                 return;
15632
15633 out_no_vpd:
15634         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15635                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15636                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15637                         strcpy(tp->board_part_number, "BCM5717");
15638                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15639                         strcpy(tp->board_part_number, "BCM5718");
15640                 else
15641                         goto nomatch;
15642         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15643                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15644                         strcpy(tp->board_part_number, "BCM57780");
15645                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15646                         strcpy(tp->board_part_number, "BCM57760");
15647                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15648                         strcpy(tp->board_part_number, "BCM57790");
15649                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15650                         strcpy(tp->board_part_number, "BCM57788");
15651                 else
15652                         goto nomatch;
15653         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15654                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15655                         strcpy(tp->board_part_number, "BCM57761");
15656                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15657                         strcpy(tp->board_part_number, "BCM57765");
15658                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15659                         strcpy(tp->board_part_number, "BCM57781");
15660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15661                         strcpy(tp->board_part_number, "BCM57785");
15662                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15663                         strcpy(tp->board_part_number, "BCM57791");
15664                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15665                         strcpy(tp->board_part_number, "BCM57795");
15666                 else
15667                         goto nomatch;
15668         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15669                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15670                         strcpy(tp->board_part_number, "BCM57762");
15671                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15672                         strcpy(tp->board_part_number, "BCM57766");
15673                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15674                         strcpy(tp->board_part_number, "BCM57782");
15675                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15676                         strcpy(tp->board_part_number, "BCM57786");
15677                 else
15678                         goto nomatch;
15679         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15680                 strcpy(tp->board_part_number, "BCM95906");
15681         } else {
15682 nomatch:
15683                 strcpy(tp->board_part_number, "none");
15684         }
15685 }
15686
15687 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15688 {
15689         u32 val;
15690
15691         if (tg3_nvram_read(tp, offset, &val) ||
15692             (val & 0xfc000000) != 0x0c000000 ||
15693             tg3_nvram_read(tp, offset + 4, &val) ||
15694             val != 0)
15695                 return 0;
15696
15697         return 1;
15698 }
15699
15700 static void tg3_read_bc_ver(struct tg3 *tp)
15701 {
15702         u32 val, offset, start, ver_offset;
15703         int i, dst_off;
15704         bool newver = false;
15705
15706         if (tg3_nvram_read(tp, 0xc, &offset) ||
15707             tg3_nvram_read(tp, 0x4, &start))
15708                 return;
15709
15710         offset = tg3_nvram_logical_addr(tp, offset);
15711
15712         if (tg3_nvram_read(tp, offset, &val))
15713                 return;
15714
15715         if ((val & 0xfc000000) == 0x0c000000) {
15716                 if (tg3_nvram_read(tp, offset + 4, &val))
15717                         return;
15718
15719                 if (val == 0)
15720                         newver = true;
15721         }
15722
15723         dst_off = strlen(tp->fw_ver);
15724
15725         if (newver) {
15726                 if (TG3_VER_SIZE - dst_off < 16 ||
15727                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15728                         return;
15729
15730                 offset = offset + ver_offset - start;
15731                 for (i = 0; i < 16; i += 4) {
15732                         __be32 v;
15733                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15734                                 return;
15735
15736                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15737                 }
15738         } else {
15739                 u32 major, minor;
15740
15741                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15742                         return;
15743
15744                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15745                         TG3_NVM_BCVER_MAJSFT;
15746                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15747                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15748                          "v%d.%02d", major, minor);
15749         }
15750 }
15751
15752 static void tg3_read_hwsb_ver(struct tg3 *tp)
15753 {
15754         u32 val, major, minor;
15755
15756         /* Use native endian representation */
15757         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15758                 return;
15759
15760         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15761                 TG3_NVM_HWSB_CFG1_MAJSFT;
15762         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15763                 TG3_NVM_HWSB_CFG1_MINSFT;
15764
15765         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15766 }
15767
15768 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15769 {
15770         u32 offset, major, minor, build;
15771
15772         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15773
15774         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15775                 return;
15776
15777         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15778         case TG3_EEPROM_SB_REVISION_0:
15779                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15780                 break;
15781         case TG3_EEPROM_SB_REVISION_2:
15782                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15783                 break;
15784         case TG3_EEPROM_SB_REVISION_3:
15785                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15786                 break;
15787         case TG3_EEPROM_SB_REVISION_4:
15788                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15789                 break;
15790         case TG3_EEPROM_SB_REVISION_5:
15791                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15792                 break;
15793         case TG3_EEPROM_SB_REVISION_6:
15794                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15795                 break;
15796         default:
15797                 return;
15798         }
15799
15800         if (tg3_nvram_read(tp, offset, &val))
15801                 return;
15802
15803         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15804                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15805         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15806                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15807         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15808
15809         if (minor > 99 || build > 26)
15810                 return;
15811
15812         offset = strlen(tp->fw_ver);
15813         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15814                  " v%d.%02d", major, minor);
15815
15816         if (build > 0) {
15817                 offset = strlen(tp->fw_ver);
15818                 if (offset < TG3_VER_SIZE - 1)
15819                         tp->fw_ver[offset] = 'a' + build - 1;
15820         }
15821 }
15822
15823 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15824 {
15825         u32 val, offset, start;
15826         int i, vlen;
15827
15828         for (offset = TG3_NVM_DIR_START;
15829              offset < TG3_NVM_DIR_END;
15830              offset += TG3_NVM_DIRENT_SIZE) {
15831                 if (tg3_nvram_read(tp, offset, &val))
15832                         return;
15833
15834                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15835                         break;
15836         }
15837
15838         if (offset == TG3_NVM_DIR_END)
15839                 return;
15840
15841         if (!tg3_flag(tp, 5705_PLUS))
15842                 start = 0x08000000;
15843         else if (tg3_nvram_read(tp, offset - 4, &start))
15844                 return;
15845
15846         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15847             !tg3_fw_img_is_valid(tp, offset) ||
15848             tg3_nvram_read(tp, offset + 8, &val))
15849                 return;
15850
15851         offset += val - start;
15852
15853         vlen = strlen(tp->fw_ver);
15854
15855         tp->fw_ver[vlen++] = ',';
15856         tp->fw_ver[vlen++] = ' ';
15857
15858         for (i = 0; i < 4; i++) {
15859                 __be32 v;
15860                 if (tg3_nvram_read_be32(tp, offset, &v))
15861                         return;
15862
15863                 offset += sizeof(v);
15864
15865                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15866                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15867                         break;
15868                 }
15869
15870                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15871                 vlen += sizeof(v);
15872         }
15873 }
15874
15875 static void tg3_probe_ncsi(struct tg3 *tp)
15876 {
15877         u32 apedata;
15878
15879         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15880         if (apedata != APE_SEG_SIG_MAGIC)
15881                 return;
15882
15883         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15884         if (!(apedata & APE_FW_STATUS_READY))
15885                 return;
15886
15887         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15888                 tg3_flag_set(tp, APE_HAS_NCSI);
15889 }
15890
15891 static void tg3_read_dash_ver(struct tg3 *tp)
15892 {
15893         int vlen;
15894         u32 apedata;
15895         char *fwtype;
15896
15897         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15898
15899         if (tg3_flag(tp, APE_HAS_NCSI))
15900                 fwtype = "NCSI";
15901         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15902                 fwtype = "SMASH";
15903         else
15904                 fwtype = "DASH";
15905
15906         vlen = strlen(tp->fw_ver);
15907
15908         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15909                  fwtype,
15910                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15911                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15912                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15913                  (apedata & APE_FW_VERSION_BLDMSK));
15914 }
15915
15916 static void tg3_read_otp_ver(struct tg3 *tp)
15917 {
15918         u32 val, val2;
15919
15920         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15921                 return;
15922
15923         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15924             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15925             TG3_OTP_MAGIC0_VALID(val)) {
15926                 u64 val64 = (u64) val << 32 | val2;
15927                 u32 ver = 0;
15928                 int i, vlen;
15929
15930                 for (i = 0; i < 7; i++) {
15931                         if ((val64 & 0xff) == 0)
15932                                 break;
15933                         ver = val64 & 0xff;
15934                         val64 >>= 8;
15935                 }
15936                 vlen = strlen(tp->fw_ver);
15937                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15938         }
15939 }
15940
15941 static void tg3_read_fw_ver(struct tg3 *tp)
15942 {
15943         u32 val;
15944         bool vpd_vers = false;
15945
15946         if (tp->fw_ver[0] != 0)
15947                 vpd_vers = true;
15948
15949         if (tg3_flag(tp, NO_NVRAM)) {
15950                 strcat(tp->fw_ver, "sb");
15951                 tg3_read_otp_ver(tp);
15952                 return;
15953         }
15954
15955         if (tg3_nvram_read(tp, 0, &val))
15956                 return;
15957
15958         if (val == TG3_EEPROM_MAGIC)
15959                 tg3_read_bc_ver(tp);
15960         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15961                 tg3_read_sb_ver(tp, val);
15962         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15963                 tg3_read_hwsb_ver(tp);
15964
15965         if (tg3_flag(tp, ENABLE_ASF)) {
15966                 if (tg3_flag(tp, ENABLE_APE)) {
15967                         tg3_probe_ncsi(tp);
15968                         if (!vpd_vers)
15969                                 tg3_read_dash_ver(tp);
15970                 } else if (!vpd_vers) {
15971                         tg3_read_mgmtfw_ver(tp);
15972                 }
15973         }
15974
15975         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15976 }
15977
15978 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15979 {
15980         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15981                 return TG3_RX_RET_MAX_SIZE_5717;
15982         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15983                 return TG3_RX_RET_MAX_SIZE_5700;
15984         else
15985                 return TG3_RX_RET_MAX_SIZE_5705;
15986 }
15987
15988 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15989         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15990         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15991         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15992         { },
15993 };
15994
15995 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15996 {
15997         struct pci_dev *peer;
15998         unsigned int func, devnr = tp->pdev->devfn & ~7;
15999
16000         for (func = 0; func < 8; func++) {
16001                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16002                 if (peer && peer != tp->pdev)
16003                         break;
16004                 pci_dev_put(peer);
16005         }
16006         /* 5704 can be configured in single-port mode, set peer to
16007          * tp->pdev in that case.
16008          */
16009         if (!peer) {
16010                 peer = tp->pdev;
16011                 return peer;
16012         }
16013
16014         /*
16015          * We don't need to keep the refcount elevated; there's no way
16016          * to remove one half of this device without removing the other
16017          */
16018         pci_dev_put(peer);
16019
16020         return peer;
16021 }
16022
16023 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16024 {
16025         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16026         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16027                 u32 reg;
16028
16029                 /* All devices that use the alternate
16030                  * ASIC REV location have a CPMU.
16031                  */
16032                 tg3_flag_set(tp, CPMU_PRESENT);
16033
16034                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16036                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16038                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16040                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16041                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16042                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16043                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16044                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16045                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16046                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16047                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16048                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16049                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16050                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16051                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16052                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16053                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16054                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16055                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16056                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16057                 else
16058                         reg = TG3PCI_PRODID_ASICREV;
16059
16060                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16061         }
16062
16063         /* Wrong chip ID in 5752 A0. This code can be removed later
16064          * as A0 is not in production.
16065          */
16066         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16067                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16068
16069         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16070                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16071
16072         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16073             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16074             tg3_asic_rev(tp) == ASIC_REV_5720)
16075                 tg3_flag_set(tp, 5717_PLUS);
16076
16077         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16078             tg3_asic_rev(tp) == ASIC_REV_57766)
16079                 tg3_flag_set(tp, 57765_CLASS);
16080
16081         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16082              tg3_asic_rev(tp) == ASIC_REV_5762)
16083                 tg3_flag_set(tp, 57765_PLUS);
16084
16085         /* Intentionally exclude ASIC_REV_5906 */
16086         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16087             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16088             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16089             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16090             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16091             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16092             tg3_flag(tp, 57765_PLUS))
16093                 tg3_flag_set(tp, 5755_PLUS);
16094
16095         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16096             tg3_asic_rev(tp) == ASIC_REV_5714)
16097                 tg3_flag_set(tp, 5780_CLASS);
16098
16099         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16100             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16101             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16102             tg3_flag(tp, 5755_PLUS) ||
16103             tg3_flag(tp, 5780_CLASS))
16104                 tg3_flag_set(tp, 5750_PLUS);
16105
16106         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16107             tg3_flag(tp, 5750_PLUS))
16108                 tg3_flag_set(tp, 5705_PLUS);
16109 }
16110
16111 static bool tg3_10_100_only_device(struct tg3 *tp,
16112                                    const struct pci_device_id *ent)
16113 {
16114         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16115
16116         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16117              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16118             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16119                 return true;
16120
16121         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16122                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16123                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16124                                 return true;
16125                 } else {
16126                         return true;
16127                 }
16128         }
16129
16130         return false;
16131 }
16132
16133 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16134 {
16135         u32 misc_ctrl_reg;
16136         u32 pci_state_reg, grc_misc_cfg;
16137         u32 val;
16138         u16 pci_cmd;
16139         int err;
16140
16141         /* Force memory write invalidate off.  If we leave it on,
16142          * then on 5700_BX chips we have to enable a workaround.
16143          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16144          * to match the cacheline size.  The Broadcom driver have this
16145          * workaround but turns MWI off all the times so never uses
16146          * it.  This seems to suggest that the workaround is insufficient.
16147          */
16148         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16149         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16150         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16151
16152         /* Important! -- Make sure register accesses are byteswapped
16153          * correctly.  Also, for those chips that require it, make
16154          * sure that indirect register accesses are enabled before
16155          * the first operation.
16156          */
16157         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16158                               &misc_ctrl_reg);
16159         tp->misc_host_ctrl |= (misc_ctrl_reg &
16160                                MISC_HOST_CTRL_CHIPREV);
16161         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16162                                tp->misc_host_ctrl);
16163
16164         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16165
16166         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16167          * we need to disable memory and use config. cycles
16168          * only to access all registers. The 5702/03 chips
16169          * can mistakenly decode the special cycles from the
16170          * ICH chipsets as memory write cycles, causing corruption
16171          * of register and memory space. Only certain ICH bridges
16172          * will drive special cycles with non-zero data during the
16173          * address phase which can fall within the 5703's address
16174          * range. This is not an ICH bug as the PCI spec allows
16175          * non-zero address during special cycles. However, only
16176          * these ICH bridges are known to drive non-zero addresses
16177          * during special cycles.
16178          *
16179          * Since special cycles do not cross PCI bridges, we only
16180          * enable this workaround if the 5703 is on the secondary
16181          * bus of these ICH bridges.
16182          */
16183         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16184             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16185                 static struct tg3_dev_id {
16186                         u32     vendor;
16187                         u32     device;
16188                         u32     rev;
16189                 } ich_chipsets[] = {
16190                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16191                           PCI_ANY_ID },
16192                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16193                           PCI_ANY_ID },
16194                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16195                           0xa },
16196                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16197                           PCI_ANY_ID },
16198                         { },
16199                 };
16200                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16201                 struct pci_dev *bridge = NULL;
16202
16203                 while (pci_id->vendor != 0) {
16204                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16205                                                 bridge);
16206                         if (!bridge) {
16207                                 pci_id++;
16208                                 continue;
16209                         }
16210                         if (pci_id->rev != PCI_ANY_ID) {
16211                                 if (bridge->revision > pci_id->rev)
16212                                         continue;
16213                         }
16214                         if (bridge->subordinate &&
16215                             (bridge->subordinate->number ==
16216                              tp->pdev->bus->number)) {
16217                                 tg3_flag_set(tp, ICH_WORKAROUND);
16218                                 pci_dev_put(bridge);
16219                                 break;
16220                         }
16221                 }
16222         }
16223
16224         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16225                 static struct tg3_dev_id {
16226                         u32     vendor;
16227                         u32     device;
16228                 } bridge_chipsets[] = {
16229                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16230                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16231                         { },
16232                 };
16233                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16234                 struct pci_dev *bridge = NULL;
16235
16236                 while (pci_id->vendor != 0) {
16237                         bridge = pci_get_device(pci_id->vendor,
16238                                                 pci_id->device,
16239                                                 bridge);
16240                         if (!bridge) {
16241                                 pci_id++;
16242                                 continue;
16243                         }
16244                         if (bridge->subordinate &&
16245                             (bridge->subordinate->number <=
16246                              tp->pdev->bus->number) &&
16247                             (bridge->subordinate->busn_res.end >=
16248                              tp->pdev->bus->number)) {
16249                                 tg3_flag_set(tp, 5701_DMA_BUG);
16250                                 pci_dev_put(bridge);
16251                                 break;
16252                         }
16253                 }
16254         }
16255
16256         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16257          * DMA addresses > 40-bit. This bridge may have other additional
16258          * 57xx devices behind it in some 4-port NIC designs for example.
16259          * Any tg3 device found behind the bridge will also need the 40-bit
16260          * DMA workaround.
16261          */
16262         if (tg3_flag(tp, 5780_CLASS)) {
16263                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16264                 tp->msi_cap = tp->pdev->msi_cap;
16265         } else {
16266                 struct pci_dev *bridge = NULL;
16267
16268                 do {
16269                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16270                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16271                                                 bridge);
16272                         if (bridge && bridge->subordinate &&
16273                             (bridge->subordinate->number <=
16274                              tp->pdev->bus->number) &&
16275                             (bridge->subordinate->busn_res.end >=
16276                              tp->pdev->bus->number)) {
16277                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16278                                 pci_dev_put(bridge);
16279                                 break;
16280                         }
16281                 } while (bridge);
16282         }
16283
16284         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16285             tg3_asic_rev(tp) == ASIC_REV_5714)
16286                 tp->pdev_peer = tg3_find_peer(tp);
16287
16288         /* Determine TSO capabilities */
16289         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16290                 ; /* Do nothing. HW bug. */
16291         else if (tg3_flag(tp, 57765_PLUS))
16292                 tg3_flag_set(tp, HW_TSO_3);
16293         else if (tg3_flag(tp, 5755_PLUS) ||
16294                  tg3_asic_rev(tp) == ASIC_REV_5906)
16295                 tg3_flag_set(tp, HW_TSO_2);
16296         else if (tg3_flag(tp, 5750_PLUS)) {
16297                 tg3_flag_set(tp, HW_TSO_1);
16298                 tg3_flag_set(tp, TSO_BUG);
16299                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16300                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16301                         tg3_flag_clear(tp, TSO_BUG);
16302         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16303                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16304                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16305                 tg3_flag_set(tp, FW_TSO);
16306                 tg3_flag_set(tp, TSO_BUG);
16307                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16308                         tp->fw_needed = FIRMWARE_TG3TSO5;
16309                 else
16310                         tp->fw_needed = FIRMWARE_TG3TSO;
16311         }
16312
16313         /* Selectively allow TSO based on operating conditions */
16314         if (tg3_flag(tp, HW_TSO_1) ||
16315             tg3_flag(tp, HW_TSO_2) ||
16316             tg3_flag(tp, HW_TSO_3) ||
16317             tg3_flag(tp, FW_TSO)) {
16318                 /* For firmware TSO, assume ASF is disabled.
16319                  * We'll disable TSO later if we discover ASF
16320                  * is enabled in tg3_get_eeprom_hw_cfg().
16321                  */
16322                 tg3_flag_set(tp, TSO_CAPABLE);
16323         } else {
16324                 tg3_flag_clear(tp, TSO_CAPABLE);
16325                 tg3_flag_clear(tp, TSO_BUG);
16326                 tp->fw_needed = NULL;
16327         }
16328
16329         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16330                 tp->fw_needed = FIRMWARE_TG3;
16331
16332         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16333                 tp->fw_needed = FIRMWARE_TG357766;
16334
16335         tp->irq_max = 1;
16336
16337         if (tg3_flag(tp, 5750_PLUS)) {
16338                 tg3_flag_set(tp, SUPPORT_MSI);
16339                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16340                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16341                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16342                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16343                      tp->pdev_peer == tp->pdev))
16344                         tg3_flag_clear(tp, SUPPORT_MSI);
16345
16346                 if (tg3_flag(tp, 5755_PLUS) ||
16347                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16348                         tg3_flag_set(tp, 1SHOT_MSI);
16349                 }
16350
16351                 if (tg3_flag(tp, 57765_PLUS)) {
16352                         tg3_flag_set(tp, SUPPORT_MSIX);
16353                         tp->irq_max = TG3_IRQ_MAX_VECS;
16354                 }
16355         }
16356
16357         tp->txq_max = 1;
16358         tp->rxq_max = 1;
16359         if (tp->irq_max > 1) {
16360                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16361                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16362
16363                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16364                     tg3_asic_rev(tp) == ASIC_REV_5720)
16365                         tp->txq_max = tp->irq_max - 1;
16366         }
16367
16368         if (tg3_flag(tp, 5755_PLUS) ||
16369             tg3_asic_rev(tp) == ASIC_REV_5906)
16370                 tg3_flag_set(tp, SHORT_DMA_BUG);
16371
16372         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16373                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16374
16375         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16376             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16377             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16378             tg3_asic_rev(tp) == ASIC_REV_5762)
16379                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16380
16381         if (tg3_flag(tp, 57765_PLUS) &&
16382             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16383                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16384
16385         if (!tg3_flag(tp, 5705_PLUS) ||
16386             tg3_flag(tp, 5780_CLASS) ||
16387             tg3_flag(tp, USE_JUMBO_BDFLAG))
16388                 tg3_flag_set(tp, JUMBO_CAPABLE);
16389
16390         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16391                               &pci_state_reg);
16392
16393         if (pci_is_pcie(tp->pdev)) {
16394                 u16 lnkctl;
16395
16396                 tg3_flag_set(tp, PCI_EXPRESS);
16397
16398                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16399                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16400                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16401                                 tg3_flag_clear(tp, HW_TSO_2);
16402                                 tg3_flag_clear(tp, TSO_CAPABLE);
16403                         }
16404                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16405                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16406                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16407                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16408                                 tg3_flag_set(tp, CLKREQ_BUG);
16409                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16410                         tg3_flag_set(tp, L1PLLPD_EN);
16411                 }
16412         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16413                 /* BCM5785 devices are effectively PCIe devices, and should
16414                  * follow PCIe codepaths, but do not have a PCIe capabilities
16415                  * section.
16416                  */
16417                 tg3_flag_set(tp, PCI_EXPRESS);
16418         } else if (!tg3_flag(tp, 5705_PLUS) ||
16419                    tg3_flag(tp, 5780_CLASS)) {
16420                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16421                 if (!tp->pcix_cap) {
16422                         dev_err(&tp->pdev->dev,
16423                                 "Cannot find PCI-X capability, aborting\n");
16424                         return -EIO;
16425                 }
16426
16427                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16428                         tg3_flag_set(tp, PCIX_MODE);
16429         }
16430
16431         /* If we have an AMD 762 or VIA K8T800 chipset, write
16432          * reordering to the mailbox registers done by the host
16433          * controller can cause major troubles.  We read back from
16434          * every mailbox register write to force the writes to be
16435          * posted to the chip in order.
16436          */
16437         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16438             !tg3_flag(tp, PCI_EXPRESS))
16439                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16440
16441         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16442                              &tp->pci_cacheline_sz);
16443         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16444                              &tp->pci_lat_timer);
16445         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16446             tp->pci_lat_timer < 64) {
16447                 tp->pci_lat_timer = 64;
16448                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16449                                       tp->pci_lat_timer);
16450         }
16451
16452         /* Important! -- It is critical that the PCI-X hw workaround
16453          * situation is decided before the first MMIO register access.
16454          */
16455         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16456                 /* 5700 BX chips need to have their TX producer index
16457                  * mailboxes written twice to workaround a bug.
16458                  */
16459                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16460
16461                 /* If we are in PCI-X mode, enable register write workaround.
16462                  *
16463                  * The workaround is to use indirect register accesses
16464                  * for all chip writes not to mailbox registers.
16465                  */
16466                 if (tg3_flag(tp, PCIX_MODE)) {
16467                         u32 pm_reg;
16468
16469                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16470
16471                         /* The chip can have it's power management PCI config
16472                          * space registers clobbered due to this bug.
16473                          * So explicitly force the chip into D0 here.
16474                          */
16475                         pci_read_config_dword(tp->pdev,
16476                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16477                                               &pm_reg);
16478                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16479                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16480                         pci_write_config_dword(tp->pdev,
16481                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16482                                                pm_reg);
16483
16484                         /* Also, force SERR#/PERR# in PCI command. */
16485                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16486                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16487                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16488                 }
16489         }
16490
16491         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16492                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16493         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16494                 tg3_flag_set(tp, PCI_32BIT);
16495
16496         /* Chip-specific fixup from Broadcom driver */
16497         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16498             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16499                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16500                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16501         }
16502
16503         /* Default fast path register access methods */
16504         tp->read32 = tg3_read32;
16505         tp->write32 = tg3_write32;
16506         tp->read32_mbox = tg3_read32;
16507         tp->write32_mbox = tg3_write32;
16508         tp->write32_tx_mbox = tg3_write32;
16509         tp->write32_rx_mbox = tg3_write32;
16510
16511         /* Various workaround register access methods */
16512         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16513                 tp->write32 = tg3_write_indirect_reg32;
16514         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16515                  (tg3_flag(tp, PCI_EXPRESS) &&
16516                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16517                 /*
16518                  * Back to back register writes can cause problems on these
16519                  * chips, the workaround is to read back all reg writes
16520                  * except those to mailbox regs.
16521                  *
16522                  * See tg3_write_indirect_reg32().
16523                  */
16524                 tp->write32 = tg3_write_flush_reg32;
16525         }
16526
16527         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16528                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16529                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16530                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16531         }
16532
16533         if (tg3_flag(tp, ICH_WORKAROUND)) {
16534                 tp->read32 = tg3_read_indirect_reg32;
16535                 tp->write32 = tg3_write_indirect_reg32;
16536                 tp->read32_mbox = tg3_read_indirect_mbox;
16537                 tp->write32_mbox = tg3_write_indirect_mbox;
16538                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16539                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16540
16541                 iounmap(tp->regs);
16542                 tp->regs = NULL;
16543
16544                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16545                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16546                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16547         }
16548         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16549                 tp->read32_mbox = tg3_read32_mbox_5906;
16550                 tp->write32_mbox = tg3_write32_mbox_5906;
16551                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16552                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16553         }
16554
16555         if (tp->write32 == tg3_write_indirect_reg32 ||
16556             (tg3_flag(tp, PCIX_MODE) &&
16557              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16558               tg3_asic_rev(tp) == ASIC_REV_5701)))
16559                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16560
16561         /* The memory arbiter has to be enabled in order for SRAM accesses
16562          * to succeed.  Normally on powerup the tg3 chip firmware will make
16563          * sure it is enabled, but other entities such as system netboot
16564          * code might disable it.
16565          */
16566         val = tr32(MEMARB_MODE);
16567         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16568
16569         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16570         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16571             tg3_flag(tp, 5780_CLASS)) {
16572                 if (tg3_flag(tp, PCIX_MODE)) {
16573                         pci_read_config_dword(tp->pdev,
16574                                               tp->pcix_cap + PCI_X_STATUS,
16575                                               &val);
16576                         tp->pci_fn = val & 0x7;
16577                 }
16578         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16579                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16580                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16581                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16582                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16583                         val = tr32(TG3_CPMU_STATUS);
16584
16585                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16586                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16587                 else
16588                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16589                                      TG3_CPMU_STATUS_FSHFT_5719;
16590         }
16591
16592         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16593                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16594                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16595         }
16596
16597         /* Get eeprom hw config before calling tg3_set_power_state().
16598          * In particular, the TG3_FLAG_IS_NIC flag must be
16599          * determined before calling tg3_set_power_state() so that
16600          * we know whether or not to switch out of Vaux power.
16601          * When the flag is set, it means that GPIO1 is used for eeprom
16602          * write protect and also implies that it is a LOM where GPIOs
16603          * are not used to switch power.
16604          */
16605         tg3_get_eeprom_hw_cfg(tp);
16606
16607         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16608                 tg3_flag_clear(tp, TSO_CAPABLE);
16609                 tg3_flag_clear(tp, TSO_BUG);
16610                 tp->fw_needed = NULL;
16611         }
16612
16613         if (tg3_flag(tp, ENABLE_APE)) {
16614                 /* Allow reads and writes to the
16615                  * APE register and memory space.
16616                  */
16617                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16618                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16619                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16620                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16621                                        pci_state_reg);
16622
16623                 tg3_ape_lock_init(tp);
16624         }
16625
16626         /* Set up tp->grc_local_ctrl before calling
16627          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16628          * will bring 5700's external PHY out of reset.
16629          * It is also used as eeprom write protect on LOMs.
16630          */
16631         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16632         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16633             tg3_flag(tp, EEPROM_WRITE_PROT))
16634                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16635                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16636         /* Unused GPIO3 must be driven as output on 5752 because there
16637          * are no pull-up resistors on unused GPIO pins.
16638          */
16639         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16640                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16641
16642         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16643             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16644             tg3_flag(tp, 57765_CLASS))
16645                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16646
16647         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16648             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16649                 /* Turn off the debug UART. */
16650                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16651                 if (tg3_flag(tp, IS_NIC))
16652                         /* Keep VMain power. */
16653                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16654                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16655         }
16656
16657         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16658                 tp->grc_local_ctrl |=
16659                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16660
16661         /* Switch out of Vaux if it is a NIC */
16662         tg3_pwrsrc_switch_to_vmain(tp);
16663
16664         /* Derive initial jumbo mode from MTU assigned in
16665          * ether_setup() via the alloc_etherdev() call
16666          */
16667         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16668                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16669
16670         /* Determine WakeOnLan speed to use. */
16671         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16672             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16673             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16674             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16675                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16676         } else {
16677                 tg3_flag_set(tp, WOL_SPEED_100MB);
16678         }
16679
16680         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16681                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16682
16683         /* A few boards don't want Ethernet@WireSpeed phy feature */
16684         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16685             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16686              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16687              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16688             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16689             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16690                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16691
16692         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16693             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16694                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16695         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16696                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16697
16698         if (tg3_flag(tp, 5705_PLUS) &&
16699             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16700             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16701             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16702             !tg3_flag(tp, 57765_PLUS)) {
16703                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16704                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16705                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16706                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16707                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16708                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16709                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16710                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16711                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16712                 } else
16713                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16714         }
16715
16716         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16717             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16718                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16719                 if (tp->phy_otp == 0)
16720                         tp->phy_otp = TG3_OTP_DEFAULT;
16721         }
16722
16723         if (tg3_flag(tp, CPMU_PRESENT))
16724                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16725         else
16726                 tp->mi_mode = MAC_MI_MODE_BASE;
16727
16728         tp->coalesce_mode = 0;
16729         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16730             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16731                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16732
16733         /* Set these bits to enable statistics workaround. */
16734         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16735             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16736             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16737             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16738                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16739                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16740         }
16741
16742         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16743             tg3_asic_rev(tp) == ASIC_REV_57780)
16744                 tg3_flag_set(tp, USE_PHYLIB);
16745
16746         err = tg3_mdio_init(tp);
16747         if (err)
16748                 return err;
16749
16750         /* Initialize data/descriptor byte/word swapping. */
16751         val = tr32(GRC_MODE);
16752         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16753             tg3_asic_rev(tp) == ASIC_REV_5762)
16754                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16755                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16756                         GRC_MODE_B2HRX_ENABLE |
16757                         GRC_MODE_HTX2B_ENABLE |
16758                         GRC_MODE_HOST_STACKUP);
16759         else
16760                 val &= GRC_MODE_HOST_STACKUP;
16761
16762         tw32(GRC_MODE, val | tp->grc_mode);
16763
16764         tg3_switch_clocks(tp);
16765
16766         /* Clear this out for sanity. */
16767         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16768
16769         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16770         tw32(TG3PCI_REG_BASE_ADDR, 0);
16771
16772         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16773                               &pci_state_reg);
16774         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16775             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16776                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16777                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16778                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16779                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16780                         void __iomem *sram_base;
16781
16782                         /* Write some dummy words into the SRAM status block
16783                          * area, see if it reads back correctly.  If the return
16784                          * value is bad, force enable the PCIX workaround.
16785                          */
16786                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16787
16788                         writel(0x00000000, sram_base);
16789                         writel(0x00000000, sram_base + 4);
16790                         writel(0xffffffff, sram_base + 4);
16791                         if (readl(sram_base) != 0x00000000)
16792                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16793                 }
16794         }
16795
16796         udelay(50);
16797         tg3_nvram_init(tp);
16798
16799         /* If the device has an NVRAM, no need to load patch firmware */
16800         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16801             !tg3_flag(tp, NO_NVRAM))
16802                 tp->fw_needed = NULL;
16803
16804         grc_misc_cfg = tr32(GRC_MISC_CFG);
16805         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16806
16807         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16808             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16809              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16810                 tg3_flag_set(tp, IS_5788);
16811
16812         if (!tg3_flag(tp, IS_5788) &&
16813             tg3_asic_rev(tp) != ASIC_REV_5700)
16814                 tg3_flag_set(tp, TAGGED_STATUS);
16815         if (tg3_flag(tp, TAGGED_STATUS)) {
16816                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16817                                       HOSTCC_MODE_CLRTICK_TXBD);
16818
16819                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16820                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16821                                        tp->misc_host_ctrl);
16822         }
16823
16824         /* Preserve the APE MAC_MODE bits */
16825         if (tg3_flag(tp, ENABLE_APE))
16826                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16827         else
16828                 tp->mac_mode = 0;
16829
16830         if (tg3_10_100_only_device(tp, ent))
16831                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16832
16833         err = tg3_phy_probe(tp);
16834         if (err) {
16835                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16836                 /* ... but do not return immediately ... */
16837                 tg3_mdio_fini(tp);
16838         }
16839
16840         tg3_read_vpd(tp);
16841         tg3_read_fw_ver(tp);
16842
16843         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16844                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16845         } else {
16846                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16847                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16848                 else
16849                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16850         }
16851
16852         /* 5700 {AX,BX} chips have a broken status block link
16853          * change bit implementation, so we must use the
16854          * status register in those cases.
16855          */
16856         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16857                 tg3_flag_set(tp, USE_LINKCHG_REG);
16858         else
16859                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16860
16861         /* The led_ctrl is set during tg3_phy_probe, here we might
16862          * have to force the link status polling mechanism based
16863          * upon subsystem IDs.
16864          */
16865         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16866             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16867             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16868                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16869                 tg3_flag_set(tp, USE_LINKCHG_REG);
16870         }
16871
16872         /* For all SERDES we poll the MAC status register. */
16873         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16874                 tg3_flag_set(tp, POLL_SERDES);
16875         else
16876                 tg3_flag_clear(tp, POLL_SERDES);
16877
16878         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16879                 tg3_flag_set(tp, POLL_CPMU_LINK);
16880
16881         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16882         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16883         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16884             tg3_flag(tp, PCIX_MODE)) {
16885                 tp->rx_offset = NET_SKB_PAD;
16886 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16887                 tp->rx_copy_thresh = ~(u16)0;
16888 #endif
16889         }
16890
16891         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16892         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16893         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16894
16895         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16896
16897         /* Increment the rx prod index on the rx std ring by at most
16898          * 8 for these chips to workaround hw errata.
16899          */
16900         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16901             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16902             tg3_asic_rev(tp) == ASIC_REV_5755)
16903                 tp->rx_std_max_post = 8;
16904
16905         if (tg3_flag(tp, ASPM_WORKAROUND))
16906                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16907                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16908
16909         return err;
16910 }
16911
16912 #ifdef CONFIG_SPARC
16913 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16914 {
16915         struct net_device *dev = tp->dev;
16916         struct pci_dev *pdev = tp->pdev;
16917         struct device_node *dp = pci_device_to_OF_node(pdev);
16918         const unsigned char *addr;
16919         int len;
16920
16921         addr = of_get_property(dp, "local-mac-address", &len);
16922         if (addr && len == ETH_ALEN) {
16923                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16924                 return 0;
16925         }
16926         return -ENODEV;
16927 }
16928
16929 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16930 {
16931         struct net_device *dev = tp->dev;
16932
16933         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16934         return 0;
16935 }
16936 #endif
16937
16938 static int tg3_get_device_address(struct tg3 *tp)
16939 {
16940         struct net_device *dev = tp->dev;
16941         u32 hi, lo, mac_offset;
16942         int addr_ok = 0;
16943         int err;
16944
16945 #ifdef CONFIG_SPARC
16946         if (!tg3_get_macaddr_sparc(tp))
16947                 return 0;
16948 #endif
16949
16950         if (tg3_flag(tp, IS_SSB_CORE)) {
16951                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16952                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16953                         return 0;
16954         }
16955
16956         mac_offset = 0x7c;
16957         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16958             tg3_flag(tp, 5780_CLASS)) {
16959                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16960                         mac_offset = 0xcc;
16961                 if (tg3_nvram_lock(tp))
16962                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16963                 else
16964                         tg3_nvram_unlock(tp);
16965         } else if (tg3_flag(tp, 5717_PLUS)) {
16966                 if (tp->pci_fn & 1)
16967                         mac_offset = 0xcc;
16968                 if (tp->pci_fn > 1)
16969                         mac_offset += 0x18c;
16970         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16971                 mac_offset = 0x10;
16972
16973         /* First try to get it from MAC address mailbox. */
16974         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16975         if ((hi >> 16) == 0x484b) {
16976                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16977                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16978
16979                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16980                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16981                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16982                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16983                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16984
16985                 /* Some old bootcode may report a 0 MAC address in SRAM */
16986                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16987         }
16988         if (!addr_ok) {
16989                 /* Next, try NVRAM. */
16990                 if (!tg3_flag(tp, NO_NVRAM) &&
16991                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16992                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16993                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16994                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16995                 }
16996                 /* Finally just fetch it out of the MAC control regs. */
16997                 else {
16998                         hi = tr32(MAC_ADDR_0_HIGH);
16999                         lo = tr32(MAC_ADDR_0_LOW);
17000
17001                         dev->dev_addr[5] = lo & 0xff;
17002                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17003                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17004                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17005                         dev->dev_addr[1] = hi & 0xff;
17006                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17007                 }
17008         }
17009
17010         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17011 #ifdef CONFIG_SPARC
17012                 if (!tg3_get_default_macaddr_sparc(tp))
17013                         return 0;
17014 #endif
17015                 return -EINVAL;
17016         }
17017         return 0;
17018 }
17019
17020 #define BOUNDARY_SINGLE_CACHELINE       1
17021 #define BOUNDARY_MULTI_CACHELINE        2
17022
17023 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17024 {
17025         int cacheline_size;
17026         u8 byte;
17027         int goal;
17028
17029         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17030         if (byte == 0)
17031                 cacheline_size = 1024;
17032         else
17033                 cacheline_size = (int) byte * 4;
17034
17035         /* On 5703 and later chips, the boundary bits have no
17036          * effect.
17037          */
17038         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17039             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17040             !tg3_flag(tp, PCI_EXPRESS))
17041                 goto out;
17042
17043 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17044         goal = BOUNDARY_MULTI_CACHELINE;
17045 #else
17046 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17047         goal = BOUNDARY_SINGLE_CACHELINE;
17048 #else
17049         goal = 0;
17050 #endif
17051 #endif
17052
17053         if (tg3_flag(tp, 57765_PLUS)) {
17054                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17055                 goto out;
17056         }
17057
17058         if (!goal)
17059                 goto out;
17060
17061         /* PCI controllers on most RISC systems tend to disconnect
17062          * when a device tries to burst across a cache-line boundary.
17063          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17064          *
17065          * Unfortunately, for PCI-E there are only limited
17066          * write-side controls for this, and thus for reads
17067          * we will still get the disconnects.  We'll also waste
17068          * these PCI cycles for both read and write for chips
17069          * other than 5700 and 5701 which do not implement the
17070          * boundary bits.
17071          */
17072         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17073                 switch (cacheline_size) {
17074                 case 16:
17075                 case 32:
17076                 case 64:
17077                 case 128:
17078                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17079                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17080                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17081                         } else {
17082                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17083                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17084                         }
17085                         break;
17086
17087                 case 256:
17088                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17089                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17090                         break;
17091
17092                 default:
17093                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17094                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17095                         break;
17096                 }
17097         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17098                 switch (cacheline_size) {
17099                 case 16:
17100                 case 32:
17101                 case 64:
17102                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17103                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17104                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17105                                 break;
17106                         }
17107                         /* fallthrough */
17108                 case 128:
17109                 default:
17110                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17111                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17112                         break;
17113                 }
17114         } else {
17115                 switch (cacheline_size) {
17116                 case 16:
17117                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17118                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17119                                         DMA_RWCTRL_WRITE_BNDRY_16);
17120                                 break;
17121                         }
17122                         /* fallthrough */
17123                 case 32:
17124                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17125                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17126                                         DMA_RWCTRL_WRITE_BNDRY_32);
17127                                 break;
17128                         }
17129                         /* fallthrough */
17130                 case 64:
17131                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17132                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17133                                         DMA_RWCTRL_WRITE_BNDRY_64);
17134                                 break;
17135                         }
17136                         /* fallthrough */
17137                 case 128:
17138                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17139                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17140                                         DMA_RWCTRL_WRITE_BNDRY_128);
17141                                 break;
17142                         }
17143                         /* fallthrough */
17144                 case 256:
17145                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17146                                 DMA_RWCTRL_WRITE_BNDRY_256);
17147                         break;
17148                 case 512:
17149                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17150                                 DMA_RWCTRL_WRITE_BNDRY_512);
17151                         break;
17152                 case 1024:
17153                 default:
17154                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17155                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17156                         break;
17157                 }
17158         }
17159
17160 out:
17161         return val;
17162 }
17163
17164 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17165                            int size, bool to_device)
17166 {
17167         struct tg3_internal_buffer_desc test_desc;
17168         u32 sram_dma_descs;
17169         int i, ret;
17170
17171         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17172
17173         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17174         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17175         tw32(RDMAC_STATUS, 0);
17176         tw32(WDMAC_STATUS, 0);
17177
17178         tw32(BUFMGR_MODE, 0);
17179         tw32(FTQ_RESET, 0);
17180
17181         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17182         test_desc.addr_lo = buf_dma & 0xffffffff;
17183         test_desc.nic_mbuf = 0x00002100;
17184         test_desc.len = size;
17185
17186         /*
17187          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17188          * the *second* time the tg3 driver was getting loaded after an
17189          * initial scan.
17190          *
17191          * Broadcom tells me:
17192          *   ...the DMA engine is connected to the GRC block and a DMA
17193          *   reset may affect the GRC block in some unpredictable way...
17194          *   The behavior of resets to individual blocks has not been tested.
17195          *
17196          * Broadcom noted the GRC reset will also reset all sub-components.
17197          */
17198         if (to_device) {
17199                 test_desc.cqid_sqid = (13 << 8) | 2;
17200
17201                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17202                 udelay(40);
17203         } else {
17204                 test_desc.cqid_sqid = (16 << 8) | 7;
17205
17206                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17207                 udelay(40);
17208         }
17209         test_desc.flags = 0x00000005;
17210
17211         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17212                 u32 val;
17213
17214                 val = *(((u32 *)&test_desc) + i);
17215                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17216                                        sram_dma_descs + (i * sizeof(u32)));
17217                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17218         }
17219         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17220
17221         if (to_device)
17222                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17223         else
17224                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17225
17226         ret = -ENODEV;
17227         for (i = 0; i < 40; i++) {
17228                 u32 val;
17229
17230                 if (to_device)
17231                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17232                 else
17233                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17234                 if ((val & 0xffff) == sram_dma_descs) {
17235                         ret = 0;
17236                         break;
17237                 }
17238
17239                 udelay(100);
17240         }
17241
17242         return ret;
17243 }
17244
17245 #define TEST_BUFFER_SIZE        0x2000
17246
17247 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17248         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17249         { },
17250 };
17251
17252 static int tg3_test_dma(struct tg3 *tp)
17253 {
17254         dma_addr_t buf_dma;
17255         u32 *buf, saved_dma_rwctrl;
17256         int ret = 0;
17257
17258         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17259                                  &buf_dma, GFP_KERNEL);
17260         if (!buf) {
17261                 ret = -ENOMEM;
17262                 goto out_nofree;
17263         }
17264
17265         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17266                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17267
17268         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17269
17270         if (tg3_flag(tp, 57765_PLUS))
17271                 goto out;
17272
17273         if (tg3_flag(tp, PCI_EXPRESS)) {
17274                 /* DMA read watermark not used on PCIE */
17275                 tp->dma_rwctrl |= 0x00180000;
17276         } else if (!tg3_flag(tp, PCIX_MODE)) {
17277                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17278                     tg3_asic_rev(tp) == ASIC_REV_5750)
17279                         tp->dma_rwctrl |= 0x003f0000;
17280                 else
17281                         tp->dma_rwctrl |= 0x003f000f;
17282         } else {
17283                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17284                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17285                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17286                         u32 read_water = 0x7;
17287
17288                         /* If the 5704 is behind the EPB bridge, we can
17289                          * do the less restrictive ONE_DMA workaround for
17290                          * better performance.
17291                          */
17292                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17293                             tg3_asic_rev(tp) == ASIC_REV_5704)
17294                                 tp->dma_rwctrl |= 0x8000;
17295                         else if (ccval == 0x6 || ccval == 0x7)
17296                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17297
17298                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17299                                 read_water = 4;
17300                         /* Set bit 23 to enable PCIX hw bug fix */
17301                         tp->dma_rwctrl |=
17302                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17303                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17304                                 (1 << 23);
17305                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17306                         /* 5780 always in PCIX mode */
17307                         tp->dma_rwctrl |= 0x00144000;
17308                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17309                         /* 5714 always in PCIX mode */
17310                         tp->dma_rwctrl |= 0x00148000;
17311                 } else {
17312                         tp->dma_rwctrl |= 0x001b000f;
17313                 }
17314         }
17315         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17316                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17317
17318         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17319             tg3_asic_rev(tp) == ASIC_REV_5704)
17320                 tp->dma_rwctrl &= 0xfffffff0;
17321
17322         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17323             tg3_asic_rev(tp) == ASIC_REV_5701) {
17324                 /* Remove this if it causes problems for some boards. */
17325                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17326
17327                 /* On 5700/5701 chips, we need to set this bit.
17328                  * Otherwise the chip will issue cacheline transactions
17329                  * to streamable DMA memory with not all the byte
17330                  * enables turned on.  This is an error on several
17331                  * RISC PCI controllers, in particular sparc64.
17332                  *
17333                  * On 5703/5704 chips, this bit has been reassigned
17334                  * a different meaning.  In particular, it is used
17335                  * on those chips to enable a PCI-X workaround.
17336                  */
17337                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17338         }
17339
17340         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17341
17342
17343         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17344             tg3_asic_rev(tp) != ASIC_REV_5701)
17345                 goto out;
17346
17347         /* It is best to perform DMA test with maximum write burst size
17348          * to expose the 5700/5701 write DMA bug.
17349          */
17350         saved_dma_rwctrl = tp->dma_rwctrl;
17351         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17352         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17353
17354         while (1) {
17355                 u32 *p = buf, i;
17356
17357                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17358                         p[i] = i;
17359
17360                 /* Send the buffer to the chip. */
17361                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17362                 if (ret) {
17363                         dev_err(&tp->pdev->dev,
17364                                 "%s: Buffer write failed. err = %d\n",
17365                                 __func__, ret);
17366                         break;
17367                 }
17368
17369                 /* Now read it back. */
17370                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17371                 if (ret) {
17372                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17373                                 "err = %d\n", __func__, ret);
17374                         break;
17375                 }
17376
17377                 /* Verify it. */
17378                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17379                         if (p[i] == i)
17380                                 continue;
17381
17382                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17383                             DMA_RWCTRL_WRITE_BNDRY_16) {
17384                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17385                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17386                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17387                                 break;
17388                         } else {
17389                                 dev_err(&tp->pdev->dev,
17390                                         "%s: Buffer corrupted on read back! "
17391                                         "(%d != %d)\n", __func__, p[i], i);
17392                                 ret = -ENODEV;
17393                                 goto out;
17394                         }
17395                 }
17396
17397                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17398                         /* Success. */
17399                         ret = 0;
17400                         break;
17401                 }
17402         }
17403         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17404             DMA_RWCTRL_WRITE_BNDRY_16) {
17405                 /* DMA test passed without adjusting DMA boundary,
17406                  * now look for chipsets that are known to expose the
17407                  * DMA bug without failing the test.
17408                  */
17409                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17410                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17411                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17412                 } else {
17413                         /* Safe to use the calculated DMA boundary. */
17414                         tp->dma_rwctrl = saved_dma_rwctrl;
17415                 }
17416
17417                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17418         }
17419
17420 out:
17421         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17422 out_nofree:
17423         return ret;
17424 }
17425
17426 static void tg3_init_bufmgr_config(struct tg3 *tp)
17427 {
17428         if (tg3_flag(tp, 57765_PLUS)) {
17429                 tp->bufmgr_config.mbuf_read_dma_low_water =
17430                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17431                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17432                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17433                 tp->bufmgr_config.mbuf_high_water =
17434                         DEFAULT_MB_HIGH_WATER_57765;
17435
17436                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17437                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17438                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17439                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17440                 tp->bufmgr_config.mbuf_high_water_jumbo =
17441                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17442         } else if (tg3_flag(tp, 5705_PLUS)) {
17443                 tp->bufmgr_config.mbuf_read_dma_low_water =
17444                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17445                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17446                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17447                 tp->bufmgr_config.mbuf_high_water =
17448                         DEFAULT_MB_HIGH_WATER_5705;
17449                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17450                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17451                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17452                         tp->bufmgr_config.mbuf_high_water =
17453                                 DEFAULT_MB_HIGH_WATER_5906;
17454                 }
17455
17456                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17457                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17458                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17459                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17460                 tp->bufmgr_config.mbuf_high_water_jumbo =
17461                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17462         } else {
17463                 tp->bufmgr_config.mbuf_read_dma_low_water =
17464                         DEFAULT_MB_RDMA_LOW_WATER;
17465                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17466                         DEFAULT_MB_MACRX_LOW_WATER;
17467                 tp->bufmgr_config.mbuf_high_water =
17468                         DEFAULT_MB_HIGH_WATER;
17469
17470                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17471                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17472                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17473                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17474                 tp->bufmgr_config.mbuf_high_water_jumbo =
17475                         DEFAULT_MB_HIGH_WATER_JUMBO;
17476         }
17477
17478         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17479         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17480 }
17481
17482 static char *tg3_phy_string(struct tg3 *tp)
17483 {
17484         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17485         case TG3_PHY_ID_BCM5400:        return "5400";
17486         case TG3_PHY_ID_BCM5401:        return "5401";
17487         case TG3_PHY_ID_BCM5411:        return "5411";
17488         case TG3_PHY_ID_BCM5701:        return "5701";
17489         case TG3_PHY_ID_BCM5703:        return "5703";
17490         case TG3_PHY_ID_BCM5704:        return "5704";
17491         case TG3_PHY_ID_BCM5705:        return "5705";
17492         case TG3_PHY_ID_BCM5750:        return "5750";
17493         case TG3_PHY_ID_BCM5752:        return "5752";
17494         case TG3_PHY_ID_BCM5714:        return "5714";
17495         case TG3_PHY_ID_BCM5780:        return "5780";
17496         case TG3_PHY_ID_BCM5755:        return "5755";
17497         case TG3_PHY_ID_BCM5787:        return "5787";
17498         case TG3_PHY_ID_BCM5784:        return "5784";
17499         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17500         case TG3_PHY_ID_BCM5906:        return "5906";
17501         case TG3_PHY_ID_BCM5761:        return "5761";
17502         case TG3_PHY_ID_BCM5718C:       return "5718C";
17503         case TG3_PHY_ID_BCM5718S:       return "5718S";
17504         case TG3_PHY_ID_BCM57765:       return "57765";
17505         case TG3_PHY_ID_BCM5719C:       return "5719C";
17506         case TG3_PHY_ID_BCM5720C:       return "5720C";
17507         case TG3_PHY_ID_BCM5762:        return "5762C";
17508         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17509         case 0:                 return "serdes";
17510         default:                return "unknown";
17511         }
17512 }
17513
17514 static char *tg3_bus_string(struct tg3 *tp, char *str)
17515 {
17516         if (tg3_flag(tp, PCI_EXPRESS)) {
17517                 strcpy(str, "PCI Express");
17518                 return str;
17519         } else if (tg3_flag(tp, PCIX_MODE)) {
17520                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17521
17522                 strcpy(str, "PCIX:");
17523
17524                 if ((clock_ctrl == 7) ||
17525                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17526                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17527                         strcat(str, "133MHz");
17528                 else if (clock_ctrl == 0)
17529                         strcat(str, "33MHz");
17530                 else if (clock_ctrl == 2)
17531                         strcat(str, "50MHz");
17532                 else if (clock_ctrl == 4)
17533                         strcat(str, "66MHz");
17534                 else if (clock_ctrl == 6)
17535                         strcat(str, "100MHz");
17536         } else {
17537                 strcpy(str, "PCI:");
17538                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17539                         strcat(str, "66MHz");
17540                 else
17541                         strcat(str, "33MHz");
17542         }
17543         if (tg3_flag(tp, PCI_32BIT))
17544                 strcat(str, ":32-bit");
17545         else
17546                 strcat(str, ":64-bit");
17547         return str;
17548 }
17549
17550 static void tg3_init_coal(struct tg3 *tp)
17551 {
17552         struct ethtool_coalesce *ec = &tp->coal;
17553
17554         memset(ec, 0, sizeof(*ec));
17555         ec->cmd = ETHTOOL_GCOALESCE;
17556         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17557         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17558         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17559         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17560         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17561         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17562         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17563         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17564         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17565
17566         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17567                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17568                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17569                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17570                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17571                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17572         }
17573
17574         if (tg3_flag(tp, 5705_PLUS)) {
17575                 ec->rx_coalesce_usecs_irq = 0;
17576                 ec->tx_coalesce_usecs_irq = 0;
17577                 ec->stats_block_coalesce_usecs = 0;
17578         }
17579 }
17580
17581 static int tg3_init_one(struct pci_dev *pdev,
17582                                   const struct pci_device_id *ent)
17583 {
17584         struct net_device *dev;
17585         struct tg3 *tp;
17586         int i, err;
17587         u32 sndmbx, rcvmbx, intmbx;
17588         char str[40];
17589         u64 dma_mask, persist_dma_mask;
17590         netdev_features_t features = 0;
17591
17592         printk_once(KERN_INFO "%s\n", version);
17593
17594         err = pci_enable_device(pdev);
17595         if (err) {
17596                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17597                 return err;
17598         }
17599
17600         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17601         if (err) {
17602                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17603                 goto err_out_disable_pdev;
17604         }
17605
17606         pci_set_master(pdev);
17607
17608         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17609         if (!dev) {
17610                 err = -ENOMEM;
17611                 goto err_out_free_res;
17612         }
17613
17614         SET_NETDEV_DEV(dev, &pdev->dev);
17615
17616         tp = netdev_priv(dev);
17617         tp->pdev = pdev;
17618         tp->dev = dev;
17619         tp->rx_mode = TG3_DEF_RX_MODE;
17620         tp->tx_mode = TG3_DEF_TX_MODE;
17621         tp->irq_sync = 1;
17622         tp->pcierr_recovery = false;
17623
17624         if (tg3_debug > 0)
17625                 tp->msg_enable = tg3_debug;
17626         else
17627                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17628
17629         if (pdev_is_ssb_gige_core(pdev)) {
17630                 tg3_flag_set(tp, IS_SSB_CORE);
17631                 if (ssb_gige_must_flush_posted_writes(pdev))
17632                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17633                 if (ssb_gige_one_dma_at_once(pdev))
17634                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17635                 if (ssb_gige_have_roboswitch(pdev)) {
17636                         tg3_flag_set(tp, USE_PHYLIB);
17637                         tg3_flag_set(tp, ROBOSWITCH);
17638                 }
17639                 if (ssb_gige_is_rgmii(pdev))
17640                         tg3_flag_set(tp, RGMII_MODE);
17641         }
17642
17643         /* The word/byte swap controls here control register access byte
17644          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17645          * setting below.
17646          */
17647         tp->misc_host_ctrl =
17648                 MISC_HOST_CTRL_MASK_PCI_INT |
17649                 MISC_HOST_CTRL_WORD_SWAP |
17650                 MISC_HOST_CTRL_INDIR_ACCESS |
17651                 MISC_HOST_CTRL_PCISTATE_RW;
17652
17653         /* The NONFRM (non-frame) byte/word swap controls take effect
17654          * on descriptor entries, anything which isn't packet data.
17655          *
17656          * The StrongARM chips on the board (one for tx, one for rx)
17657          * are running in big-endian mode.
17658          */
17659         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17660                         GRC_MODE_WSWAP_NONFRM_DATA);
17661 #ifdef __BIG_ENDIAN
17662         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17663 #endif
17664         spin_lock_init(&tp->lock);
17665         spin_lock_init(&tp->indirect_lock);
17666         INIT_WORK(&tp->reset_task, tg3_reset_task);
17667
17668         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17669         if (!tp->regs) {
17670                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17671                 err = -ENOMEM;
17672                 goto err_out_free_dev;
17673         }
17674
17675         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17676             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17677             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17678             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17679             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17680             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17681             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17682             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17683             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17684             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17685             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17686             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17687             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17688             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17689             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17690                 tg3_flag_set(tp, ENABLE_APE);
17691                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17692                 if (!tp->aperegs) {
17693                         dev_err(&pdev->dev,
17694                                 "Cannot map APE registers, aborting\n");
17695                         err = -ENOMEM;
17696                         goto err_out_iounmap;
17697                 }
17698         }
17699
17700         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17701         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17702
17703         dev->ethtool_ops = &tg3_ethtool_ops;
17704         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17705         dev->netdev_ops = &tg3_netdev_ops;
17706         dev->irq = pdev->irq;
17707
17708         err = tg3_get_invariants(tp, ent);
17709         if (err) {
17710                 dev_err(&pdev->dev,
17711                         "Problem fetching invariants of chip, aborting\n");
17712                 goto err_out_apeunmap;
17713         }
17714
17715         /* The EPB bridge inside 5714, 5715, and 5780 and any
17716          * device behind the EPB cannot support DMA addresses > 40-bit.
17717          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17718          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17719          * do DMA address check in tg3_start_xmit().
17720          */
17721         if (tg3_flag(tp, IS_5788))
17722                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17723         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17724                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17725 #ifdef CONFIG_HIGHMEM
17726                 dma_mask = DMA_BIT_MASK(64);
17727 #endif
17728         } else
17729                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17730
17731         /* Configure DMA attributes. */
17732         if (dma_mask > DMA_BIT_MASK(32)) {
17733                 err = pci_set_dma_mask(pdev, dma_mask);
17734                 if (!err) {
17735                         features |= NETIF_F_HIGHDMA;
17736                         err = pci_set_consistent_dma_mask(pdev,
17737                                                           persist_dma_mask);
17738                         if (err < 0) {
17739                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17740                                         "DMA for consistent allocations\n");
17741                                 goto err_out_apeunmap;
17742                         }
17743                 }
17744         }
17745         if (err || dma_mask == DMA_BIT_MASK(32)) {
17746                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17747                 if (err) {
17748                         dev_err(&pdev->dev,
17749                                 "No usable DMA configuration, aborting\n");
17750                         goto err_out_apeunmap;
17751                 }
17752         }
17753
17754         tg3_init_bufmgr_config(tp);
17755
17756         /* 5700 B0 chips do not support checksumming correctly due
17757          * to hardware bugs.
17758          */
17759         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17760                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17761
17762                 if (tg3_flag(tp, 5755_PLUS))
17763                         features |= NETIF_F_IPV6_CSUM;
17764         }
17765
17766         /* TSO is on by default on chips that support hardware TSO.
17767          * Firmware TSO on older chips gives lower performance, so it
17768          * is off by default, but can be enabled using ethtool.
17769          */
17770         if ((tg3_flag(tp, HW_TSO_1) ||
17771              tg3_flag(tp, HW_TSO_2) ||
17772              tg3_flag(tp, HW_TSO_3)) &&
17773             (features & NETIF_F_IP_CSUM))
17774                 features |= NETIF_F_TSO;
17775         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17776                 if (features & NETIF_F_IPV6_CSUM)
17777                         features |= NETIF_F_TSO6;
17778                 if (tg3_flag(tp, HW_TSO_3) ||
17779                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17780                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17781                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17782                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17783                     tg3_asic_rev(tp) == ASIC_REV_57780)
17784                         features |= NETIF_F_TSO_ECN;
17785         }
17786
17787         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17788                          NETIF_F_HW_VLAN_CTAG_RX;
17789         dev->vlan_features |= features;
17790
17791         /*
17792          * Add loopback capability only for a subset of devices that support
17793          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17794          * loopback for the remaining devices.
17795          */
17796         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17797             !tg3_flag(tp, CPMU_PRESENT))
17798                 /* Add the loopback capability */
17799                 features |= NETIF_F_LOOPBACK;
17800
17801         dev->hw_features |= features;
17802         dev->priv_flags |= IFF_UNICAST_FLT;
17803
17804         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17805         dev->min_mtu = TG3_MIN_MTU;
17806         dev->max_mtu = TG3_MAX_MTU(tp);
17807
17808         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17809             !tg3_flag(tp, TSO_CAPABLE) &&
17810             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17811                 tg3_flag_set(tp, MAX_RXPEND_64);
17812                 tp->rx_pending = 63;
17813         }
17814
17815         err = tg3_get_device_address(tp);
17816         if (err) {
17817                 dev_err(&pdev->dev,
17818                         "Could not obtain valid ethernet address, aborting\n");
17819                 goto err_out_apeunmap;
17820         }
17821
17822         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17823         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17824         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17825         for (i = 0; i < tp->irq_max; i++) {
17826                 struct tg3_napi *tnapi = &tp->napi[i];
17827
17828                 tnapi->tp = tp;
17829                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17830
17831                 tnapi->int_mbox = intmbx;
17832                 if (i <= 4)
17833                         intmbx += 0x8;
17834                 else
17835                         intmbx += 0x4;
17836
17837                 tnapi->consmbox = rcvmbx;
17838                 tnapi->prodmbox = sndmbx;
17839
17840                 if (i)
17841                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17842                 else
17843                         tnapi->coal_now = HOSTCC_MODE_NOW;
17844
17845                 if (!tg3_flag(tp, SUPPORT_MSIX))
17846                         break;
17847
17848                 /*
17849                  * If we support MSIX, we'll be using RSS.  If we're using
17850                  * RSS, the first vector only handles link interrupts and the
17851                  * remaining vectors handle rx and tx interrupts.  Reuse the
17852                  * mailbox values for the next iteration.  The values we setup
17853                  * above are still useful for the single vectored mode.
17854                  */
17855                 if (!i)
17856                         continue;
17857
17858                 rcvmbx += 0x8;
17859
17860                 if (sndmbx & 0x4)
17861                         sndmbx -= 0x4;
17862                 else
17863                         sndmbx += 0xc;
17864         }
17865
17866         /*
17867          * Reset chip in case UNDI or EFI driver did not shutdown
17868          * DMA self test will enable WDMAC and we'll see (spurious)
17869          * pending DMA on the PCI bus at that point.
17870          */
17871         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17872             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17873                 tg3_full_lock(tp, 0);
17874                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17875                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17876                 tg3_full_unlock(tp);
17877         }
17878
17879         err = tg3_test_dma(tp);
17880         if (err) {
17881                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17882                 goto err_out_apeunmap;
17883         }
17884
17885         tg3_init_coal(tp);
17886
17887         pci_set_drvdata(pdev, dev);
17888
17889         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17890             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17891             tg3_asic_rev(tp) == ASIC_REV_5762)
17892                 tg3_flag_set(tp, PTP_CAPABLE);
17893
17894         tg3_timer_init(tp);
17895
17896         tg3_carrier_off(tp);
17897
17898         err = register_netdev(dev);
17899         if (err) {
17900                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17901                 goto err_out_apeunmap;
17902         }
17903
17904         if (tg3_flag(tp, PTP_CAPABLE)) {
17905                 tg3_ptp_init(tp);
17906                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17907                                                    &tp->pdev->dev);
17908                 if (IS_ERR(tp->ptp_clock))
17909                         tp->ptp_clock = NULL;
17910         }
17911
17912         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17913                     tp->board_part_number,
17914                     tg3_chip_rev_id(tp),
17915                     tg3_bus_string(tp, str),
17916                     dev->dev_addr);
17917
17918         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17919                 char *ethtype;
17920
17921                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17922                         ethtype = "10/100Base-TX";
17923                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17924                         ethtype = "1000Base-SX";
17925                 else
17926                         ethtype = "10/100/1000Base-T";
17927
17928                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17929                             "(WireSpeed[%d], EEE[%d])\n",
17930                             tg3_phy_string(tp), ethtype,
17931                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17932                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17933         }
17934
17935         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17936                     (dev->features & NETIF_F_RXCSUM) != 0,
17937                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17938                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17939                     tg3_flag(tp, ENABLE_ASF) != 0,
17940                     tg3_flag(tp, TSO_CAPABLE) != 0);
17941         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17942                     tp->dma_rwctrl,
17943                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17944                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17945
17946         pci_save_state(pdev);
17947
17948         return 0;
17949
17950 err_out_apeunmap:
17951         if (tp->aperegs) {
17952                 iounmap(tp->aperegs);
17953                 tp->aperegs = NULL;
17954         }
17955
17956 err_out_iounmap:
17957         if (tp->regs) {
17958                 iounmap(tp->regs);
17959                 tp->regs = NULL;
17960         }
17961
17962 err_out_free_dev:
17963         free_netdev(dev);
17964
17965 err_out_free_res:
17966         pci_release_regions(pdev);
17967
17968 err_out_disable_pdev:
17969         if (pci_is_enabled(pdev))
17970                 pci_disable_device(pdev);
17971         return err;
17972 }
17973
17974 static void tg3_remove_one(struct pci_dev *pdev)
17975 {
17976         struct net_device *dev = pci_get_drvdata(pdev);
17977
17978         if (dev) {
17979                 struct tg3 *tp = netdev_priv(dev);
17980
17981                 tg3_ptp_fini(tp);
17982
17983                 release_firmware(tp->fw);
17984
17985                 tg3_reset_task_cancel(tp);
17986
17987                 if (tg3_flag(tp, USE_PHYLIB)) {
17988                         tg3_phy_fini(tp);
17989                         tg3_mdio_fini(tp);
17990                 }
17991
17992                 unregister_netdev(dev);
17993                 if (tp->aperegs) {
17994                         iounmap(tp->aperegs);
17995                         tp->aperegs = NULL;
17996                 }
17997                 if (tp->regs) {
17998                         iounmap(tp->regs);
17999                         tp->regs = NULL;
18000                 }
18001                 free_netdev(dev);
18002                 pci_release_regions(pdev);
18003                 pci_disable_device(pdev);
18004         }
18005 }
18006
18007 #ifdef CONFIG_PM_SLEEP
18008 static int tg3_suspend(struct device *device)
18009 {
18010         struct pci_dev *pdev = to_pci_dev(device);
18011         struct net_device *dev = pci_get_drvdata(pdev);
18012         struct tg3 *tp = netdev_priv(dev);
18013         int err = 0;
18014
18015         rtnl_lock();
18016
18017         if (!netif_running(dev))
18018                 goto unlock;
18019
18020         tg3_reset_task_cancel(tp);
18021         tg3_phy_stop(tp);
18022         tg3_netif_stop(tp);
18023
18024         tg3_timer_stop(tp);
18025
18026         tg3_full_lock(tp, 1);
18027         tg3_disable_ints(tp);
18028         tg3_full_unlock(tp);
18029
18030         netif_device_detach(dev);
18031
18032         tg3_full_lock(tp, 0);
18033         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18034         tg3_flag_clear(tp, INIT_COMPLETE);
18035         tg3_full_unlock(tp);
18036
18037         err = tg3_power_down_prepare(tp);
18038         if (err) {
18039                 int err2;
18040
18041                 tg3_full_lock(tp, 0);
18042
18043                 tg3_flag_set(tp, INIT_COMPLETE);
18044                 err2 = tg3_restart_hw(tp, true);
18045                 if (err2)
18046                         goto out;
18047
18048                 tg3_timer_start(tp);
18049
18050                 netif_device_attach(dev);
18051                 tg3_netif_start(tp);
18052
18053 out:
18054                 tg3_full_unlock(tp);
18055
18056                 if (!err2)
18057                         tg3_phy_start(tp);
18058         }
18059
18060 unlock:
18061         rtnl_unlock();
18062         return err;
18063 }
18064
18065 static int tg3_resume(struct device *device)
18066 {
18067         struct pci_dev *pdev = to_pci_dev(device);
18068         struct net_device *dev = pci_get_drvdata(pdev);
18069         struct tg3 *tp = netdev_priv(dev);
18070         int err = 0;
18071
18072         rtnl_lock();
18073
18074         if (!netif_running(dev))
18075                 goto unlock;
18076
18077         netif_device_attach(dev);
18078
18079         tg3_full_lock(tp, 0);
18080
18081         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18082
18083         tg3_flag_set(tp, INIT_COMPLETE);
18084         err = tg3_restart_hw(tp,
18085                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18086         if (err)
18087                 goto out;
18088
18089         tg3_timer_start(tp);
18090
18091         tg3_netif_start(tp);
18092
18093 out:
18094         tg3_full_unlock(tp);
18095
18096         if (!err)
18097                 tg3_phy_start(tp);
18098
18099 unlock:
18100         rtnl_unlock();
18101         return err;
18102 }
18103 #endif /* CONFIG_PM_SLEEP */
18104
18105 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18106
18107 static void tg3_shutdown(struct pci_dev *pdev)
18108 {
18109         struct net_device *dev = pci_get_drvdata(pdev);
18110         struct tg3 *tp = netdev_priv(dev);
18111
18112         rtnl_lock();
18113         netif_device_detach(dev);
18114
18115         if (netif_running(dev))
18116                 dev_close(dev);
18117
18118         if (system_state == SYSTEM_POWER_OFF)
18119                 tg3_power_down(tp);
18120
18121         rtnl_unlock();
18122 }
18123
18124 /**
18125  * tg3_io_error_detected - called when PCI error is detected
18126  * @pdev: Pointer to PCI device
18127  * @state: The current pci connection state
18128  *
18129  * This function is called after a PCI bus error affecting
18130  * this device has been detected.
18131  */
18132 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18133                                               pci_channel_state_t state)
18134 {
18135         struct net_device *netdev = pci_get_drvdata(pdev);
18136         struct tg3 *tp = netdev_priv(netdev);
18137         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18138
18139         netdev_info(netdev, "PCI I/O error detected\n");
18140
18141         rtnl_lock();
18142
18143         /* We probably don't have netdev yet */
18144         if (!netdev || !netif_running(netdev))
18145                 goto done;
18146
18147         /* We needn't recover from permanent error */
18148         if (state == pci_channel_io_frozen)
18149                 tp->pcierr_recovery = true;
18150
18151         tg3_phy_stop(tp);
18152
18153         tg3_netif_stop(tp);
18154
18155         tg3_timer_stop(tp);
18156
18157         /* Want to make sure that the reset task doesn't run */
18158         tg3_reset_task_cancel(tp);
18159
18160         netif_device_detach(netdev);
18161
18162         /* Clean up software state, even if MMIO is blocked */
18163         tg3_full_lock(tp, 0);
18164         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18165         tg3_full_unlock(tp);
18166
18167 done:
18168         if (state == pci_channel_io_perm_failure) {
18169                 if (netdev) {
18170                         tg3_napi_enable(tp);
18171                         dev_close(netdev);
18172                 }
18173                 err = PCI_ERS_RESULT_DISCONNECT;
18174         } else {
18175                 pci_disable_device(pdev);
18176         }
18177
18178         rtnl_unlock();
18179
18180         return err;
18181 }
18182
18183 /**
18184  * tg3_io_slot_reset - called after the pci bus has been reset.
18185  * @pdev: Pointer to PCI device
18186  *
18187  * Restart the card from scratch, as if from a cold-boot.
18188  * At this point, the card has exprienced a hard reset,
18189  * followed by fixups by BIOS, and has its config space
18190  * set up identically to what it was at cold boot.
18191  */
18192 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18193 {
18194         struct net_device *netdev = pci_get_drvdata(pdev);
18195         struct tg3 *tp = netdev_priv(netdev);
18196         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18197         int err;
18198
18199         rtnl_lock();
18200
18201         if (pci_enable_device(pdev)) {
18202                 dev_err(&pdev->dev,
18203                         "Cannot re-enable PCI device after reset.\n");
18204                 goto done;
18205         }
18206
18207         pci_set_master(pdev);
18208         pci_restore_state(pdev);
18209         pci_save_state(pdev);
18210
18211         if (!netdev || !netif_running(netdev)) {
18212                 rc = PCI_ERS_RESULT_RECOVERED;
18213                 goto done;
18214         }
18215
18216         err = tg3_power_up(tp);
18217         if (err)
18218                 goto done;
18219
18220         rc = PCI_ERS_RESULT_RECOVERED;
18221
18222 done:
18223         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18224                 tg3_napi_enable(tp);
18225                 dev_close(netdev);
18226         }
18227         rtnl_unlock();
18228
18229         return rc;
18230 }
18231
18232 /**
18233  * tg3_io_resume - called when traffic can start flowing again.
18234  * @pdev: Pointer to PCI device
18235  *
18236  * This callback is called when the error recovery driver tells
18237  * us that its OK to resume normal operation.
18238  */
18239 static void tg3_io_resume(struct pci_dev *pdev)
18240 {
18241         struct net_device *netdev = pci_get_drvdata(pdev);
18242         struct tg3 *tp = netdev_priv(netdev);
18243         int err;
18244
18245         rtnl_lock();
18246
18247         if (!netdev || !netif_running(netdev))
18248                 goto done;
18249
18250         tg3_full_lock(tp, 0);
18251         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18252         tg3_flag_set(tp, INIT_COMPLETE);
18253         err = tg3_restart_hw(tp, true);
18254         if (err) {
18255                 tg3_full_unlock(tp);
18256                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18257                 goto done;
18258         }
18259
18260         netif_device_attach(netdev);
18261
18262         tg3_timer_start(tp);
18263
18264         tg3_netif_start(tp);
18265
18266         tg3_full_unlock(tp);
18267
18268         tg3_phy_start(tp);
18269
18270 done:
18271         tp->pcierr_recovery = false;
18272         rtnl_unlock();
18273 }
18274
18275 static const struct pci_error_handlers tg3_err_handler = {
18276         .error_detected = tg3_io_error_detected,
18277         .slot_reset     = tg3_io_slot_reset,
18278         .resume         = tg3_io_resume
18279 };
18280
18281 static struct pci_driver tg3_driver = {
18282         .name           = DRV_MODULE_NAME,
18283         .id_table       = tg3_pci_tbl,
18284         .probe          = tg3_init_one,
18285         .remove         = tg3_remove_one,
18286         .err_handler    = &tg3_err_handler,
18287         .driver.pm      = &tg3_pm_ops,
18288         .shutdown       = tg3_shutdown,
18289 };
18290
18291 module_pci_driver(tg3_driver);