]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
3be87efdc93d6347da8417ddcd101ed90cc12d8c
[linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  *
10  * Firmware is:
11  *      Derived from proprietary unpublished source code,
12  *      Copyright (C) 2000-2016 Broadcom Corporation.
13  *      Copyright (C) 2016-2017 Broadcom Ltd.
14  *
15  *      Permission is hereby granted for the distribution of this firmware
16  *      data in hexadecimal or equivalent format, provided this copyright
17  *      notice is accompanying it.
18  */
19
20
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/in.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
42 #include <linux/if.h>
43 #include <linux/if_vlan.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
53
54 #include <net/checksum.h>
55 #include <net/ip.h>
56
57 #include <linux/io.h>
58 #include <asm/byteorder.h>
59 #include <linux/uaccess.h>
60
61 #include <uapi/linux/net_tstamp.h>
62 #include <linux/ptp_clock_kernel.h>
63
64 #ifdef CONFIG_SPARC
65 #include <asm/idprom.h>
66 #include <asm/prom.h>
67 #endif
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 #define TG3_MAJ_NUM                     3
100 #define TG3_MIN_NUM                     137
101 #define DRV_MODULE_VERSION      \
102         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE      "May 11, 2014"
104
105 #define RESET_KIND_SHUTDOWN     0
106 #define RESET_KIND_INIT         1
107 #define RESET_KIND_SUSPEND      2
108
109 #define TG3_DEF_RX_MODE         0
110 #define TG3_DEF_TX_MODE         0
111 #define TG3_DEF_MSG_ENABLE        \
112         (NETIF_MSG_DRV          | \
113          NETIF_MSG_PROBE        | \
114          NETIF_MSG_LINK         | \
115          NETIF_MSG_TIMER        | \
116          NETIF_MSG_IFDOWN       | \
117          NETIF_MSG_IFUP         | \
118          NETIF_MSG_RX_ERR       | \
119          NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
122
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126
127 #define TG3_TX_TIMEOUT                  (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU                     ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING         200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153
154 #define TG3_TX_RING_SIZE                512
155 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
164                                  TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB               64
168
169 #define TG3_RX_STD_DMA_SZ               1536
170 #define TG3_RX_JMB_DMA_SZ               9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD           256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
197 #else
198         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K            2048
210 #define TG3_TX_BD_DMA_MAX_4K            4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
218 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3            "tigon/tg3.bin"
221 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
224
225 static char version[] =
226         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235
236 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
242
243 static const struct pci_device_id tg3_pci_tbl[] = {
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271                         TG3_DRV_DATA_FLAG_5705_10_100},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293                         PCI_VENDOR_ID_LENOVO,
294                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359         {}
360 };
361
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363
364 static const struct {
365         const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367         { "rx_octets" },
368         { "rx_fragments" },
369         { "rx_ucast_packets" },
370         { "rx_mcast_packets" },
371         { "rx_bcast_packets" },
372         { "rx_fcs_errors" },
373         { "rx_align_errors" },
374         { "rx_xon_pause_rcvd" },
375         { "rx_xoff_pause_rcvd" },
376         { "rx_mac_ctrl_rcvd" },
377         { "rx_xoff_entered" },
378         { "rx_frame_too_long_errors" },
379         { "rx_jabbers" },
380         { "rx_undersize_packets" },
381         { "rx_in_length_errors" },
382         { "rx_out_length_errors" },
383         { "rx_64_or_less_octet_packets" },
384         { "rx_65_to_127_octet_packets" },
385         { "rx_128_to_255_octet_packets" },
386         { "rx_256_to_511_octet_packets" },
387         { "rx_512_to_1023_octet_packets" },
388         { "rx_1024_to_1522_octet_packets" },
389         { "rx_1523_to_2047_octet_packets" },
390         { "rx_2048_to_4095_octet_packets" },
391         { "rx_4096_to_8191_octet_packets" },
392         { "rx_8192_to_9022_octet_packets" },
393
394         { "tx_octets" },
395         { "tx_collisions" },
396
397         { "tx_xon_sent" },
398         { "tx_xoff_sent" },
399         { "tx_flow_control" },
400         { "tx_mac_errors" },
401         { "tx_single_collisions" },
402         { "tx_mult_collisions" },
403         { "tx_deferred" },
404         { "tx_excessive_collisions" },
405         { "tx_late_collisions" },
406         { "tx_collide_2times" },
407         { "tx_collide_3times" },
408         { "tx_collide_4times" },
409         { "tx_collide_5times" },
410         { "tx_collide_6times" },
411         { "tx_collide_7times" },
412         { "tx_collide_8times" },
413         { "tx_collide_9times" },
414         { "tx_collide_10times" },
415         { "tx_collide_11times" },
416         { "tx_collide_12times" },
417         { "tx_collide_13times" },
418         { "tx_collide_14times" },
419         { "tx_collide_15times" },
420         { "tx_ucast_packets" },
421         { "tx_mcast_packets" },
422         { "tx_bcast_packets" },
423         { "tx_carrier_sense_errors" },
424         { "tx_discards" },
425         { "tx_errors" },
426
427         { "dma_writeq_full" },
428         { "dma_write_prioq_full" },
429         { "rxbds_empty" },
430         { "rx_discards" },
431         { "rx_errors" },
432         { "rx_threshold_hit" },
433
434         { "dma_readq_full" },
435         { "dma_read_prioq_full" },
436         { "tx_comp_queue_full" },
437
438         { "ring_set_send_prod_index" },
439         { "ring_status_update" },
440         { "nic_irqs" },
441         { "nic_avoided_irqs" },
442         { "nic_tx_threshold_hit" },
443
444         { "mbuf_lwm_thresh_hit" },
445 };
446
447 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST          0
449 #define TG3_LINK_TEST           1
450 #define TG3_REGISTER_TEST       2
451 #define TG3_MEMORY_TEST         3
452 #define TG3_MAC_LOOPB_TEST      4
453 #define TG3_PHY_LOOPB_TEST      5
454 #define TG3_EXT_LOOPB_TEST      6
455 #define TG3_INTERRUPT_TEST      7
456
457
458 static const struct {
459         const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
462         [TG3_LINK_TEST]         = { "link test         (online) " },
463         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
464         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
465         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
466         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
467         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
468         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
469 };
470
471 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
472
473
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476         writel(val, tp->regs + off);
477 }
478
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481         return readl(tp->regs + off);
482 }
483
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486         writel(val, tp->aperegs + off);
487 }
488
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491         return readl(tp->aperegs + off);
492 }
493
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496         unsigned long flags;
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506         writel(val, tp->regs + off);
507         readl(tp->regs + off);
508 }
509
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512         unsigned long flags;
513         u32 val;
514
515         spin_lock_irqsave(&tp->indirect_lock, flags);
516         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518         spin_unlock_irqrestore(&tp->indirect_lock, flags);
519         return val;
520 }
521
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         unsigned long flags;
525
526         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528                                        TG3_64BIT_REG_LOW, val);
529                 return;
530         }
531         if (off == TG3_RX_STD_PROD_IDX_REG) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533                                        TG3_64BIT_REG_LOW, val);
534                 return;
535         }
536
537         spin_lock_irqsave(&tp->indirect_lock, flags);
538         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540         spin_unlock_irqrestore(&tp->indirect_lock, flags);
541
542         /* In indirect mode when disabling interrupts, we also need
543          * to clear the interrupt bit in the GRC local ctrl register.
544          */
545         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546             (val == 0x1)) {
547                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549         }
550 }
551
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554         unsigned long flags;
555         u32 val;
556
557         spin_lock_irqsave(&tp->indirect_lock, flags);
558         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560         spin_unlock_irqrestore(&tp->indirect_lock, flags);
561         return val;
562 }
563
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572                 /* Non-posted methods */
573                 tp->write32(tp, off, val);
574         else {
575                 /* Posted method */
576                 tg3_write32(tp, off, val);
577                 if (usec_wait)
578                         udelay(usec_wait);
579                 tp->read32(tp, off);
580         }
581         /* Wait again after the read for the posted method to guarantee that
582          * the wait time is met.
583          */
584         if (usec_wait)
585                 udelay(usec_wait);
586 }
587
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590         tp->write32_mbox(tp, off, val);
591         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593              !tg3_flag(tp, ICH_WORKAROUND)))
594                 tp->read32_mbox(tp, off);
595 }
596
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599         void __iomem *mbox = tp->regs + off;
600         writel(val, mbox);
601         if (tg3_flag(tp, TXD_MBOX_HWBUG))
602                 writel(val, mbox);
603         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604             tg3_flag(tp, FLUSH_POSTED_WRITES))
605                 readl(mbox);
606 }
607
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610         return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615         writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617
618 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
623
624 #define tw32(reg, val)                  tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)                       tp->read32(tp, reg)
628
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631         unsigned long flags;
632
633         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635                 return;
636
637         spin_lock_irqsave(&tp->indirect_lock, flags);
638         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         } else {
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         }
651         spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656         unsigned long flags;
657
658         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660                 *val = 0;
661                 return;
662         }
663
664         spin_lock_irqsave(&tp->indirect_lock, flags);
665         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668
669                 /* Always leave this as zero. */
670                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         } else {
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673                 *val = tr32(TG3PCI_MEM_WIN_DATA);
674
675                 /* Always leave this as zero. */
676                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677         }
678         spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683         int i;
684         u32 regbase, bit;
685
686         if (tg3_asic_rev(tp) == ASIC_REV_5761)
687                 regbase = TG3_APE_LOCK_GRANT;
688         else
689                 regbase = TG3_APE_PER_LOCK_GRANT;
690
691         /* Make sure the driver hasn't any stale locks. */
692         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693                 switch (i) {
694                 case TG3_APE_LOCK_PHY0:
695                 case TG3_APE_LOCK_PHY1:
696                 case TG3_APE_LOCK_PHY2:
697                 case TG3_APE_LOCK_PHY3:
698                         bit = APE_LOCK_GRANT_DRIVER;
699                         break;
700                 default:
701                         if (!tp->pci_fn)
702                                 bit = APE_LOCK_GRANT_DRIVER;
703                         else
704                                 bit = 1 << tp->pci_fn;
705                 }
706                 tg3_ape_write32(tp, regbase + 4 * i, bit);
707         }
708
709 }
710
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713         int i, off;
714         int ret = 0;
715         u32 status, req, gnt, bit;
716
717         if (!tg3_flag(tp, ENABLE_APE))
718                 return 0;
719
720         switch (locknum) {
721         case TG3_APE_LOCK_GPIO:
722                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723                         return 0;
724         case TG3_APE_LOCK_GRC:
725         case TG3_APE_LOCK_MEM:
726                 if (!tp->pci_fn)
727                         bit = APE_LOCK_REQ_DRIVER;
728                 else
729                         bit = 1 << tp->pci_fn;
730                 break;
731         case TG3_APE_LOCK_PHY0:
732         case TG3_APE_LOCK_PHY1:
733         case TG3_APE_LOCK_PHY2:
734         case TG3_APE_LOCK_PHY3:
735                 bit = APE_LOCK_REQ_DRIVER;
736                 break;
737         default:
738                 return -EINVAL;
739         }
740
741         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
742                 req = TG3_APE_LOCK_REQ;
743                 gnt = TG3_APE_LOCK_GRANT;
744         } else {
745                 req = TG3_APE_PER_LOCK_REQ;
746                 gnt = TG3_APE_PER_LOCK_GRANT;
747         }
748
749         off = 4 * locknum;
750
751         tg3_ape_write32(tp, req + off, bit);
752
753         /* Wait for up to 1 millisecond to acquire lock. */
754         for (i = 0; i < 100; i++) {
755                 status = tg3_ape_read32(tp, gnt + off);
756                 if (status == bit)
757                         break;
758                 if (pci_channel_offline(tp->pdev))
759                         break;
760
761                 udelay(10);
762         }
763
764         if (status != bit) {
765                 /* Revoke the lock request. */
766                 tg3_ape_write32(tp, gnt + off, bit);
767                 ret = -EBUSY;
768         }
769
770         return ret;
771 }
772
773 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
774 {
775         u32 gnt, bit;
776
777         if (!tg3_flag(tp, ENABLE_APE))
778                 return;
779
780         switch (locknum) {
781         case TG3_APE_LOCK_GPIO:
782                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
783                         return;
784         case TG3_APE_LOCK_GRC:
785         case TG3_APE_LOCK_MEM:
786                 if (!tp->pci_fn)
787                         bit = APE_LOCK_GRANT_DRIVER;
788                 else
789                         bit = 1 << tp->pci_fn;
790                 break;
791         case TG3_APE_LOCK_PHY0:
792         case TG3_APE_LOCK_PHY1:
793         case TG3_APE_LOCK_PHY2:
794         case TG3_APE_LOCK_PHY3:
795                 bit = APE_LOCK_GRANT_DRIVER;
796                 break;
797         default:
798                 return;
799         }
800
801         if (tg3_asic_rev(tp) == ASIC_REV_5761)
802                 gnt = TG3_APE_LOCK_GRANT;
803         else
804                 gnt = TG3_APE_PER_LOCK_GRANT;
805
806         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
807 }
808
809 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
810 {
811         u32 apedata;
812
813         while (timeout_us) {
814                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815                         return -EBUSY;
816
817                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
818                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819                         break;
820
821                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822
823                 udelay(10);
824                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825         }
826
827         return timeout_us ? 0 : -EBUSY;
828 }
829
830 #ifdef CONFIG_TIGON3_HWMON
831 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
832 {
833         u32 i, apedata;
834
835         for (i = 0; i < timeout_us / 10; i++) {
836                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
837
838                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
839                         break;
840
841                 udelay(10);
842         }
843
844         return i == timeout_us / 10;
845 }
846
847 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
848                                    u32 len)
849 {
850         int err;
851         u32 i, bufoff, msgoff, maxlen, apedata;
852
853         if (!tg3_flag(tp, APE_HAS_NCSI))
854                 return 0;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
857         if (apedata != APE_SEG_SIG_MAGIC)
858                 return -ENODEV;
859
860         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861         if (!(apedata & APE_FW_STATUS_READY))
862                 return -EAGAIN;
863
864         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
865                  TG3_APE_SHMEM_BASE;
866         msgoff = bufoff + 2 * sizeof(u32);
867         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
868
869         while (len) {
870                 u32 length;
871
872                 /* Cap xfer sizes to scratchpad limits. */
873                 length = (len > maxlen) ? maxlen : len;
874                 len -= length;
875
876                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
877                 if (!(apedata & APE_FW_STATUS_READY))
878                         return -EAGAIN;
879
880                 /* Wait for up to 1 msec for APE to service previous event. */
881                 err = tg3_ape_event_lock(tp, 1000);
882                 if (err)
883                         return err;
884
885                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
886                           APE_EVENT_STATUS_SCRTCHPD_READ |
887                           APE_EVENT_STATUS_EVENT_PENDING;
888                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
889
890                 tg3_ape_write32(tp, bufoff, base_off);
891                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
892
893                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
894                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
895
896                 base_off += length;
897
898                 if (tg3_ape_wait_for_event(tp, 30000))
899                         return -EAGAIN;
900
901                 for (i = 0; length; i += 4, length -= 4) {
902                         u32 val = tg3_ape_read32(tp, msgoff + i);
903                         memcpy(data, &val, sizeof(u32));
904                         data++;
905                 }
906         }
907
908         return 0;
909 }
910 #endif
911
912 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
913 {
914         int err;
915         u32 apedata;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
918         if (apedata != APE_SEG_SIG_MAGIC)
919                 return -EAGAIN;
920
921         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
922         if (!(apedata & APE_FW_STATUS_READY))
923                 return -EAGAIN;
924
925         /* Wait for up to 20 millisecond for APE to service previous event. */
926         err = tg3_ape_event_lock(tp, 20000);
927         if (err)
928                 return err;
929
930         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
931                         event | APE_EVENT_STATUS_EVENT_PENDING);
932
933         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
934         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
935
936         return 0;
937 }
938
939 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
940 {
941         u32 event;
942         u32 apedata;
943
944         if (!tg3_flag(tp, ENABLE_APE))
945                 return;
946
947         switch (kind) {
948         case RESET_KIND_INIT:
949                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
950                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
951                                 APE_HOST_SEG_SIG_MAGIC);
952                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
953                                 APE_HOST_SEG_LEN_MAGIC);
954                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
955                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
956                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
957                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
958                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
959                                 APE_HOST_BEHAV_NO_PHYLOCK);
960                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
961                                     TG3_APE_HOST_DRVR_STATE_START);
962
963                 event = APE_EVENT_STATUS_STATE_START;
964                 break;
965         case RESET_KIND_SHUTDOWN:
966                 if (device_may_wakeup(&tp->pdev->dev) &&
967                     tg3_flag(tp, WOL_ENABLE)) {
968                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
969                                             TG3_APE_HOST_WOL_SPEED_AUTO);
970                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
971                 } else
972                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
973
974                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
975
976                 event = APE_EVENT_STATUS_STATE_UNLOAD;
977                 break;
978         default:
979                 return;
980         }
981
982         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
983
984         tg3_ape_send_event(tp, event);
985 }
986
987 static void tg3_send_ape_heartbeat(struct tg3 *tp,
988                                    unsigned long interval)
989 {
990         /* Check if hb interval has exceeded */
991         if (!tg3_flag(tp, ENABLE_APE) ||
992             time_before(jiffies, tp->ape_hb_jiffies + interval))
993                 return;
994
995         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
996         tp->ape_hb_jiffies = jiffies;
997 }
998
999 static void tg3_disable_ints(struct tg3 *tp)
1000 {
1001         int i;
1002
1003         tw32(TG3PCI_MISC_HOST_CTRL,
1004              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1005         for (i = 0; i < tp->irq_max; i++)
1006                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1007 }
1008
1009 static void tg3_enable_ints(struct tg3 *tp)
1010 {
1011         int i;
1012
1013         tp->irq_sync = 0;
1014         wmb();
1015
1016         tw32(TG3PCI_MISC_HOST_CTRL,
1017              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1018
1019         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1020         for (i = 0; i < tp->irq_cnt; i++) {
1021                 struct tg3_napi *tnapi = &tp->napi[i];
1022
1023                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024                 if (tg3_flag(tp, 1SHOT_MSI))
1025                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026
1027                 tp->coal_now |= tnapi->coal_now;
1028         }
1029
1030         /* Force an initial interrupt */
1031         if (!tg3_flag(tp, TAGGED_STATUS) &&
1032             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1033                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1034         else
1035                 tw32(HOSTCC_MODE, tp->coal_now);
1036
1037         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1038 }
1039
1040 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1041 {
1042         struct tg3 *tp = tnapi->tp;
1043         struct tg3_hw_status *sblk = tnapi->hw_status;
1044         unsigned int work_exists = 0;
1045
1046         /* check for phy events */
1047         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1048                 if (sblk->status & SD_STATUS_LINK_CHG)
1049                         work_exists = 1;
1050         }
1051
1052         /* check for TX work to do */
1053         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1054                 work_exists = 1;
1055
1056         /* check for RX work to do */
1057         if (tnapi->rx_rcb_prod_idx &&
1058             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1059                 work_exists = 1;
1060
1061         return work_exists;
1062 }
1063
1064 /* tg3_int_reenable
1065  *  similar to tg3_enable_ints, but it accurately determines whether there
1066  *  is new work pending and can return without flushing the PIO write
1067  *  which reenables interrupts
1068  */
1069 static void tg3_int_reenable(struct tg3_napi *tnapi)
1070 {
1071         struct tg3 *tp = tnapi->tp;
1072
1073         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1074         mmiowb();
1075
1076         /* When doing tagged status, this work check is unnecessary.
1077          * The last_tag we write above tells the chip which piece of
1078          * work we've completed.
1079          */
1080         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1081                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1082                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1083 }
1084
1085 static void tg3_switch_clocks(struct tg3 *tp)
1086 {
1087         u32 clock_ctrl;
1088         u32 orig_clock_ctrl;
1089
1090         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1091                 return;
1092
1093         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1094
1095         orig_clock_ctrl = clock_ctrl;
1096         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1097                        CLOCK_CTRL_CLKRUN_OENABLE |
1098                        0x1f);
1099         tp->pci_clock_ctrl = clock_ctrl;
1100
1101         if (tg3_flag(tp, 5705_PLUS)) {
1102                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1103                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1105                 }
1106         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1107                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1108                             clock_ctrl |
1109                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1110                             40);
1111                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1112                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1113                             40);
1114         }
1115         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1116 }
1117
1118 #define PHY_BUSY_LOOPS  5000
1119
1120 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1121                          u32 *val)
1122 {
1123         u32 frame_val;
1124         unsigned int loops;
1125         int ret;
1126
1127         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1128                 tw32_f(MAC_MI_MODE,
1129                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1130                 udelay(80);
1131         }
1132
1133         tg3_ape_lock(tp, tp->phy_ape_lock);
1134
1135         *val = 0x0;
1136
1137         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1138                       MI_COM_PHY_ADDR_MASK);
1139         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1140                       MI_COM_REG_ADDR_MASK);
1141         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1142
1143         tw32_f(MAC_MI_COM, frame_val);
1144
1145         loops = PHY_BUSY_LOOPS;
1146         while (loops != 0) {
1147                 udelay(10);
1148                 frame_val = tr32(MAC_MI_COM);
1149
1150                 if ((frame_val & MI_COM_BUSY) == 0) {
1151                         udelay(5);
1152                         frame_val = tr32(MAC_MI_COM);
1153                         break;
1154                 }
1155                 loops -= 1;
1156         }
1157
1158         ret = -EBUSY;
1159         if (loops != 0) {
1160                 *val = frame_val & MI_COM_DATA_MASK;
1161                 ret = 0;
1162         }
1163
1164         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1165                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1166                 udelay(80);
1167         }
1168
1169         tg3_ape_unlock(tp, tp->phy_ape_lock);
1170
1171         return ret;
1172 }
1173
1174 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1175 {
1176         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1177 }
1178
1179 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1180                           u32 val)
1181 {
1182         u32 frame_val;
1183         unsigned int loops;
1184         int ret;
1185
1186         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1187             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1188                 return 0;
1189
1190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191                 tw32_f(MAC_MI_MODE,
1192                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1193                 udelay(80);
1194         }
1195
1196         tg3_ape_lock(tp, tp->phy_ape_lock);
1197
1198         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1199                       MI_COM_PHY_ADDR_MASK);
1200         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1201                       MI_COM_REG_ADDR_MASK);
1202         frame_val |= (val & MI_COM_DATA_MASK);
1203         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1204
1205         tw32_f(MAC_MI_COM, frame_val);
1206
1207         loops = PHY_BUSY_LOOPS;
1208         while (loops != 0) {
1209                 udelay(10);
1210                 frame_val = tr32(MAC_MI_COM);
1211                 if ((frame_val & MI_COM_BUSY) == 0) {
1212                         udelay(5);
1213                         frame_val = tr32(MAC_MI_COM);
1214                         break;
1215                 }
1216                 loops -= 1;
1217         }
1218
1219         ret = -EBUSY;
1220         if (loops != 0)
1221                 ret = 0;
1222
1223         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1224                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1225                 udelay(80);
1226         }
1227
1228         tg3_ape_unlock(tp, tp->phy_ape_lock);
1229
1230         return ret;
1231 }
1232
1233 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1234 {
1235         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1236 }
1237
1238 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1239 {
1240         int err;
1241
1242         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1243         if (err)
1244                 goto done;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1251                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1252         if (err)
1253                 goto done;
1254
1255         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1256
1257 done:
1258         return err;
1259 }
1260
1261 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1262 {
1263         int err;
1264
1265         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1266         if (err)
1267                 goto done;
1268
1269         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1270         if (err)
1271                 goto done;
1272
1273         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1274                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1275         if (err)
1276                 goto done;
1277
1278         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1279
1280 done:
1281         return err;
1282 }
1283
1284 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1285 {
1286         int err;
1287
1288         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289         if (!err)
1290                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292         return err;
1293 }
1294
1295 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1296 {
1297         int err;
1298
1299         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1300         if (!err)
1301                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1302
1303         return err;
1304 }
1305
1306 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1307 {
1308         int err;
1309
1310         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1311                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1312                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1313         if (!err)
1314                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1315
1316         return err;
1317 }
1318
1319 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1320 {
1321         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1322                 set |= MII_TG3_AUXCTL_MISC_WREN;
1323
1324         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1325 }
1326
1327 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1328 {
1329         u32 val;
1330         int err;
1331
1332         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1333
1334         if (err)
1335                 return err;
1336
1337         if (enable)
1338                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339         else
1340                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1341
1342         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1343                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1344
1345         return err;
1346 }
1347
1348 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1349 {
1350         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1351                             reg | val | MII_TG3_MISC_SHDW_WREN);
1352 }
1353
1354 static int tg3_bmcr_reset(struct tg3 *tp)
1355 {
1356         u32 phy_control;
1357         int limit, err;
1358
1359         /* OK, reset it, and poll the BMCR_RESET bit until it
1360          * clears or we time out.
1361          */
1362         phy_control = BMCR_RESET;
1363         err = tg3_writephy(tp, MII_BMCR, phy_control);
1364         if (err != 0)
1365                 return -EBUSY;
1366
1367         limit = 5000;
1368         while (limit--) {
1369                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1370                 if (err != 0)
1371                         return -EBUSY;
1372
1373                 if ((phy_control & BMCR_RESET) == 0) {
1374                         udelay(40);
1375                         break;
1376                 }
1377                 udelay(10);
1378         }
1379         if (limit < 0)
1380                 return -EBUSY;
1381
1382         return 0;
1383 }
1384
1385 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1386 {
1387         struct tg3 *tp = bp->priv;
1388         u32 val;
1389
1390         spin_lock_bh(&tp->lock);
1391
1392         if (__tg3_readphy(tp, mii_id, reg, &val))
1393                 val = -EIO;
1394
1395         spin_unlock_bh(&tp->lock);
1396
1397         return val;
1398 }
1399
1400 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1401 {
1402         struct tg3 *tp = bp->priv;
1403         u32 ret = 0;
1404
1405         spin_lock_bh(&tp->lock);
1406
1407         if (__tg3_writephy(tp, mii_id, reg, val))
1408                 ret = -EIO;
1409
1410         spin_unlock_bh(&tp->lock);
1411
1412         return ret;
1413 }
1414
1415 static void tg3_mdio_config_5785(struct tg3 *tp)
1416 {
1417         u32 val;
1418         struct phy_device *phydev;
1419
1420         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1421         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1422         case PHY_ID_BCM50610:
1423         case PHY_ID_BCM50610M:
1424                 val = MAC_PHYCFG2_50610_LED_MODES;
1425                 break;
1426         case PHY_ID_BCMAC131:
1427                 val = MAC_PHYCFG2_AC131_LED_MODES;
1428                 break;
1429         case PHY_ID_RTL8211C:
1430                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1431                 break;
1432         case PHY_ID_RTL8201E:
1433                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1434                 break;
1435         default:
1436                 return;
1437         }
1438
1439         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1440                 tw32(MAC_PHYCFG2, val);
1441
1442                 val = tr32(MAC_PHYCFG1);
1443                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1444                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1445                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1446                 tw32(MAC_PHYCFG1, val);
1447
1448                 return;
1449         }
1450
1451         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1452                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1453                        MAC_PHYCFG2_FMODE_MASK_MASK |
1454                        MAC_PHYCFG2_GMODE_MASK_MASK |
1455                        MAC_PHYCFG2_ACT_MASK_MASK   |
1456                        MAC_PHYCFG2_QUAL_MASK_MASK |
1457                        MAC_PHYCFG2_INBAND_ENABLE;
1458
1459         tw32(MAC_PHYCFG2, val);
1460
1461         val = tr32(MAC_PHYCFG1);
1462         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1463                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1464         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1465                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1466                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1469         }
1470         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1471                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1472         tw32(MAC_PHYCFG1, val);
1473
1474         val = tr32(MAC_EXT_RGMII_MODE);
1475         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1476                  MAC_RGMII_MODE_RX_QUALITY |
1477                  MAC_RGMII_MODE_RX_ACTIVITY |
1478                  MAC_RGMII_MODE_RX_ENG_DET |
1479                  MAC_RGMII_MODE_TX_ENABLE |
1480                  MAC_RGMII_MODE_TX_LOWPWR |
1481                  MAC_RGMII_MODE_TX_RESET);
1482         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1483                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1484                         val |= MAC_RGMII_MODE_RX_INT_B |
1485                                MAC_RGMII_MODE_RX_QUALITY |
1486                                MAC_RGMII_MODE_RX_ACTIVITY |
1487                                MAC_RGMII_MODE_RX_ENG_DET;
1488                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1489                         val |= MAC_RGMII_MODE_TX_ENABLE |
1490                                MAC_RGMII_MODE_TX_LOWPWR |
1491                                MAC_RGMII_MODE_TX_RESET;
1492         }
1493         tw32(MAC_EXT_RGMII_MODE, val);
1494 }
1495
1496 static void tg3_mdio_start(struct tg3 *tp)
1497 {
1498         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1499         tw32_f(MAC_MI_MODE, tp->mi_mode);
1500         udelay(80);
1501
1502         if (tg3_flag(tp, MDIOBUS_INITED) &&
1503             tg3_asic_rev(tp) == ASIC_REV_5785)
1504                 tg3_mdio_config_5785(tp);
1505 }
1506
1507 static int tg3_mdio_init(struct tg3 *tp)
1508 {
1509         int i;
1510         u32 reg;
1511         struct phy_device *phydev;
1512
1513         if (tg3_flag(tp, 5717_PLUS)) {
1514                 u32 is_serdes;
1515
1516                 tp->phy_addr = tp->pci_fn + 1;
1517
1518                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1519                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1520                 else
1521                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1522                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1523                 if (is_serdes)
1524                         tp->phy_addr += 7;
1525         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1526                 int addr;
1527
1528                 addr = ssb_gige_get_phyaddr(tp->pdev);
1529                 if (addr < 0)
1530                         return addr;
1531                 tp->phy_addr = addr;
1532         } else
1533                 tp->phy_addr = TG3_PHY_MII_ADDR;
1534
1535         tg3_mdio_start(tp);
1536
1537         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1538                 return 0;
1539
1540         tp->mdio_bus = mdiobus_alloc();
1541         if (tp->mdio_bus == NULL)
1542                 return -ENOMEM;
1543
1544         tp->mdio_bus->name     = "tg3 mdio bus";
1545         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1546                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1547         tp->mdio_bus->priv     = tp;
1548         tp->mdio_bus->parent   = &tp->pdev->dev;
1549         tp->mdio_bus->read     = &tg3_mdio_read;
1550         tp->mdio_bus->write    = &tg3_mdio_write;
1551         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1552
1553         /* The bus registration will look for all the PHYs on the mdio bus.
1554          * Unfortunately, it does not ensure the PHY is powered up before
1555          * accessing the PHY ID registers.  A chip reset is the
1556          * quickest way to bring the device back to an operational state..
1557          */
1558         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1559                 tg3_bmcr_reset(tp);
1560
1561         i = mdiobus_register(tp->mdio_bus);
1562         if (i) {
1563                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1564                 mdiobus_free(tp->mdio_bus);
1565                 return i;
1566         }
1567
1568         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1569
1570         if (!phydev || !phydev->drv) {
1571                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1572                 mdiobus_unregister(tp->mdio_bus);
1573                 mdiobus_free(tp->mdio_bus);
1574                 return -ENODEV;
1575         }
1576
1577         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1578         case PHY_ID_BCM57780:
1579                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1580                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581                 break;
1582         case PHY_ID_BCM50610:
1583         case PHY_ID_BCM50610M:
1584                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1585                                      PHY_BRCM_RX_REFCLK_UNUSED |
1586                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1587                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1589                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1590                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1591                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1592                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1593                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1594                 /* fallthru */
1595         case PHY_ID_RTL8211C:
1596                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1597                 break;
1598         case PHY_ID_RTL8201E:
1599         case PHY_ID_BCMAC131:
1600                 phydev->interface = PHY_INTERFACE_MODE_MII;
1601                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1602                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1603                 break;
1604         }
1605
1606         tg3_flag_set(tp, MDIOBUS_INITED);
1607
1608         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1609                 tg3_mdio_config_5785(tp);
1610
1611         return 0;
1612 }
1613
1614 static void tg3_mdio_fini(struct tg3 *tp)
1615 {
1616         if (tg3_flag(tp, MDIOBUS_INITED)) {
1617                 tg3_flag_clear(tp, MDIOBUS_INITED);
1618                 mdiobus_unregister(tp->mdio_bus);
1619                 mdiobus_free(tp->mdio_bus);
1620         }
1621 }
1622
1623 /* tp->lock is held. */
1624 static inline void tg3_generate_fw_event(struct tg3 *tp)
1625 {
1626         u32 val;
1627
1628         val = tr32(GRC_RX_CPU_EVENT);
1629         val |= GRC_RX_CPU_DRIVER_EVENT;
1630         tw32_f(GRC_RX_CPU_EVENT, val);
1631
1632         tp->last_event_jiffies = jiffies;
1633 }
1634
1635 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1636
1637 /* tp->lock is held. */
1638 static void tg3_wait_for_event_ack(struct tg3 *tp)
1639 {
1640         int i;
1641         unsigned int delay_cnt;
1642         long time_remain;
1643
1644         /* If enough time has passed, no wait is necessary. */
1645         time_remain = (long)(tp->last_event_jiffies + 1 +
1646                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1647                       (long)jiffies;
1648         if (time_remain < 0)
1649                 return;
1650
1651         /* Check if we can shorten the wait time. */
1652         delay_cnt = jiffies_to_usecs(time_remain);
1653         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1654                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1655         delay_cnt = (delay_cnt >> 3) + 1;
1656
1657         for (i = 0; i < delay_cnt; i++) {
1658                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1659                         break;
1660                 if (pci_channel_offline(tp->pdev))
1661                         break;
1662
1663                 udelay(8);
1664         }
1665 }
1666
1667 /* tp->lock is held. */
1668 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1669 {
1670         u32 reg, val;
1671
1672         val = 0;
1673         if (!tg3_readphy(tp, MII_BMCR, &reg))
1674                 val = reg << 16;
1675         if (!tg3_readphy(tp, MII_BMSR, &reg))
1676                 val |= (reg & 0xffff);
1677         *data++ = val;
1678
1679         val = 0;
1680         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1681                 val = reg << 16;
1682         if (!tg3_readphy(tp, MII_LPA, &reg))
1683                 val |= (reg & 0xffff);
1684         *data++ = val;
1685
1686         val = 0;
1687         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1688                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1689                         val = reg << 16;
1690                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1691                         val |= (reg & 0xffff);
1692         }
1693         *data++ = val;
1694
1695         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1696                 val = reg << 16;
1697         else
1698                 val = 0;
1699         *data++ = val;
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_ump_link_report(struct tg3 *tp)
1704 {
1705         u32 data[4];
1706
1707         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1708                 return;
1709
1710         tg3_phy_gather_ump_data(tp, data);
1711
1712         tg3_wait_for_event_ack(tp);
1713
1714         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1715         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1716         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1717         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1718         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1719         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1720
1721         tg3_generate_fw_event(tp);
1722 }
1723
1724 /* tp->lock is held. */
1725 static void tg3_stop_fw(struct tg3 *tp)
1726 {
1727         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1728                 /* Wait for RX cpu to ACK the previous event. */
1729                 tg3_wait_for_event_ack(tp);
1730
1731                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1732
1733                 tg3_generate_fw_event(tp);
1734
1735                 /* Wait for RX cpu to ACK this event. */
1736                 tg3_wait_for_event_ack(tp);
1737         }
1738 }
1739
1740 /* tp->lock is held. */
1741 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1742 {
1743         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1744                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1745
1746         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1747                 switch (kind) {
1748                 case RESET_KIND_INIT:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_START);
1751                         break;
1752
1753                 case RESET_KIND_SHUTDOWN:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_UNLOAD);
1756                         break;
1757
1758                 case RESET_KIND_SUSPEND:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_SUSPEND);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767 }
1768
1769 /* tp->lock is held. */
1770 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1771 {
1772         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1773                 switch (kind) {
1774                 case RESET_KIND_INIT:
1775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776                                       DRV_STATE_START_DONE);
1777                         break;
1778
1779                 case RESET_KIND_SHUTDOWN:
1780                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1781                                       DRV_STATE_UNLOAD_DONE);
1782                         break;
1783
1784                 default:
1785                         break;
1786                 }
1787         }
1788 }
1789
1790 /* tp->lock is held. */
1791 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1792 {
1793         if (tg3_flag(tp, ENABLE_ASF)) {
1794                 switch (kind) {
1795                 case RESET_KIND_INIT:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_START);
1798                         break;
1799
1800                 case RESET_KIND_SHUTDOWN:
1801                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1802                                       DRV_STATE_UNLOAD);
1803                         break;
1804
1805                 case RESET_KIND_SUSPEND:
1806                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1807                                       DRV_STATE_SUSPEND);
1808                         break;
1809
1810                 default:
1811                         break;
1812                 }
1813         }
1814 }
1815
1816 static int tg3_poll_fw(struct tg3 *tp)
1817 {
1818         int i;
1819         u32 val;
1820
1821         if (tg3_flag(tp, NO_FWARE_REPORTED))
1822                 return 0;
1823
1824         if (tg3_flag(tp, IS_SSB_CORE)) {
1825                 /* We don't use firmware. */
1826                 return 0;
1827         }
1828
1829         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1830                 /* Wait up to 20ms for init done. */
1831                 for (i = 0; i < 200; i++) {
1832                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1833                                 return 0;
1834                         if (pci_channel_offline(tp->pdev))
1835                                 return -ENODEV;
1836
1837                         udelay(100);
1838                 }
1839                 return -ENODEV;
1840         }
1841
1842         /* Wait for firmware initialization to complete. */
1843         for (i = 0; i < 100000; i++) {
1844                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1845                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1846                         break;
1847                 if (pci_channel_offline(tp->pdev)) {
1848                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1849                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1850                                 netdev_info(tp->dev, "No firmware running\n");
1851                         }
1852
1853                         break;
1854                 }
1855
1856                 udelay(10);
1857         }
1858
1859         /* Chip might not be fitted with firmware.  Some Sun onboard
1860          * parts are configured like that.  So don't signal the timeout
1861          * of the above loop as an error, but do report the lack of
1862          * running firmware once.
1863          */
1864         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1865                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1866
1867                 netdev_info(tp->dev, "No firmware running\n");
1868         }
1869
1870         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1871                 /* The 57765 A0 needs a little more
1872                  * time to do some important work.
1873                  */
1874                 mdelay(10);
1875         }
1876
1877         return 0;
1878 }
1879
1880 static void tg3_link_report(struct tg3 *tp)
1881 {
1882         if (!netif_carrier_ok(tp->dev)) {
1883                 netif_info(tp, link, tp->dev, "Link is down\n");
1884                 tg3_ump_link_report(tp);
1885         } else if (netif_msg_link(tp)) {
1886                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1887                             (tp->link_config.active_speed == SPEED_1000 ?
1888                              1000 :
1889                              (tp->link_config.active_speed == SPEED_100 ?
1890                               100 : 10)),
1891                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1892                              "full" : "half"));
1893
1894                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1895                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1896                             "on" : "off",
1897                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1898                             "on" : "off");
1899
1900                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1901                         netdev_info(tp->dev, "EEE is %s\n",
1902                                     tp->setlpicnt ? "enabled" : "disabled");
1903
1904                 tg3_ump_link_report(tp);
1905         }
1906
1907         tp->link_up = netif_carrier_ok(tp->dev);
1908 }
1909
1910 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1911 {
1912         u32 flowctrl = 0;
1913
1914         if (adv & ADVERTISE_PAUSE_CAP) {
1915                 flowctrl |= FLOW_CTRL_RX;
1916                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1917                         flowctrl |= FLOW_CTRL_TX;
1918         } else if (adv & ADVERTISE_PAUSE_ASYM)
1919                 flowctrl |= FLOW_CTRL_TX;
1920
1921         return flowctrl;
1922 }
1923
1924 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1925 {
1926         u16 miireg;
1927
1928         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1929                 miireg = ADVERTISE_1000XPAUSE;
1930         else if (flow_ctrl & FLOW_CTRL_TX)
1931                 miireg = ADVERTISE_1000XPSE_ASYM;
1932         else if (flow_ctrl & FLOW_CTRL_RX)
1933                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1934         else
1935                 miireg = 0;
1936
1937         return miireg;
1938 }
1939
1940 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1941 {
1942         u32 flowctrl = 0;
1943
1944         if (adv & ADVERTISE_1000XPAUSE) {
1945                 flowctrl |= FLOW_CTRL_RX;
1946                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1947                         flowctrl |= FLOW_CTRL_TX;
1948         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1949                 flowctrl |= FLOW_CTRL_TX;
1950
1951         return flowctrl;
1952 }
1953
1954 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1955 {
1956         u8 cap = 0;
1957
1958         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1959                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1960         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1961                 if (lcladv & ADVERTISE_1000XPAUSE)
1962                         cap = FLOW_CTRL_RX;
1963                 if (rmtadv & ADVERTISE_1000XPAUSE)
1964                         cap = FLOW_CTRL_TX;
1965         }
1966
1967         return cap;
1968 }
1969
1970 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1971 {
1972         u8 autoneg;
1973         u8 flowctrl = 0;
1974         u32 old_rx_mode = tp->rx_mode;
1975         u32 old_tx_mode = tp->tx_mode;
1976
1977         if (tg3_flag(tp, USE_PHYLIB))
1978                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1979         else
1980                 autoneg = tp->link_config.autoneg;
1981
1982         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1983                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1984                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1985                 else
1986                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1987         } else
1988                 flowctrl = tp->link_config.flowctrl;
1989
1990         tp->link_config.active_flowctrl = flowctrl;
1991
1992         if (flowctrl & FLOW_CTRL_RX)
1993                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1994         else
1995                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1996
1997         if (old_rx_mode != tp->rx_mode)
1998                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1999
2000         if (flowctrl & FLOW_CTRL_TX)
2001                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2002         else
2003                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2004
2005         if (old_tx_mode != tp->tx_mode)
2006                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2007 }
2008
2009 static void tg3_adjust_link(struct net_device *dev)
2010 {
2011         u8 oldflowctrl, linkmesg = 0;
2012         u32 mac_mode, lcl_adv, rmt_adv;
2013         struct tg3 *tp = netdev_priv(dev);
2014         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2015
2016         spin_lock_bh(&tp->lock);
2017
2018         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2019                                     MAC_MODE_HALF_DUPLEX);
2020
2021         oldflowctrl = tp->link_config.active_flowctrl;
2022
2023         if (phydev->link) {
2024                 lcl_adv = 0;
2025                 rmt_adv = 0;
2026
2027                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2028                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2029                 else if (phydev->speed == SPEED_1000 ||
2030                          tg3_asic_rev(tp) != ASIC_REV_5785)
2031                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2032                 else
2033                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2034
2035                 if (phydev->duplex == DUPLEX_HALF)
2036                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2037                 else {
2038                         lcl_adv = mii_advertise_flowctrl(
2039                                   tp->link_config.flowctrl);
2040
2041                         if (phydev->pause)
2042                                 rmt_adv = LPA_PAUSE_CAP;
2043                         if (phydev->asym_pause)
2044                                 rmt_adv |= LPA_PAUSE_ASYM;
2045                 }
2046
2047                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2048         } else
2049                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2050
2051         if (mac_mode != tp->mac_mode) {
2052                 tp->mac_mode = mac_mode;
2053                 tw32_f(MAC_MODE, tp->mac_mode);
2054                 udelay(40);
2055         }
2056
2057         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2058                 if (phydev->speed == SPEED_10)
2059                         tw32(MAC_MI_STAT,
2060                              MAC_MI_STAT_10MBPS_MODE |
2061                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2062                 else
2063                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2064         }
2065
2066         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2067                 tw32(MAC_TX_LENGTHS,
2068                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069                       (6 << TX_LENGTHS_IPG_SHIFT) |
2070                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071         else
2072                 tw32(MAC_TX_LENGTHS,
2073                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2074                       (6 << TX_LENGTHS_IPG_SHIFT) |
2075                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2076
2077         if (phydev->link != tp->old_link ||
2078             phydev->speed != tp->link_config.active_speed ||
2079             phydev->duplex != tp->link_config.active_duplex ||
2080             oldflowctrl != tp->link_config.active_flowctrl)
2081                 linkmesg = 1;
2082
2083         tp->old_link = phydev->link;
2084         tp->link_config.active_speed = phydev->speed;
2085         tp->link_config.active_duplex = phydev->duplex;
2086
2087         spin_unlock_bh(&tp->lock);
2088
2089         if (linkmesg)
2090                 tg3_link_report(tp);
2091 }
2092
2093 static int tg3_phy_init(struct tg3 *tp)
2094 {
2095         struct phy_device *phydev;
2096
2097         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2098                 return 0;
2099
2100         /* Bring the PHY back to a known state. */
2101         tg3_bmcr_reset(tp);
2102
2103         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2104
2105         /* Attach the MAC to the PHY. */
2106         phydev = phy_connect(tp->dev, phydev_name(phydev),
2107                              tg3_adjust_link, phydev->interface);
2108         if (IS_ERR(phydev)) {
2109                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2110                 return PTR_ERR(phydev);
2111         }
2112
2113         /* Mask with MAC supported features. */
2114         switch (phydev->interface) {
2115         case PHY_INTERFACE_MODE_GMII:
2116         case PHY_INTERFACE_MODE_RGMII:
2117                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2118                         phydev->supported &= (PHY_GBIT_FEATURES |
2119                                               SUPPORTED_Pause |
2120                                               SUPPORTED_Asym_Pause);
2121                         break;
2122                 }
2123                 /* fallthru */
2124         case PHY_INTERFACE_MODE_MII:
2125                 phydev->supported &= (PHY_BASIC_FEATURES |
2126                                       SUPPORTED_Pause |
2127                                       SUPPORTED_Asym_Pause);
2128                 break;
2129         default:
2130                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2131                 return -EINVAL;
2132         }
2133
2134         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2135
2136         phydev->advertising = phydev->supported;
2137
2138         phy_attached_info(phydev);
2139
2140         return 0;
2141 }
2142
2143 static void tg3_phy_start(struct tg3 *tp)
2144 {
2145         struct phy_device *phydev;
2146
2147         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2148                 return;
2149
2150         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2151
2152         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2153                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2154                 phydev->speed = tp->link_config.speed;
2155                 phydev->duplex = tp->link_config.duplex;
2156                 phydev->autoneg = tp->link_config.autoneg;
2157                 phydev->advertising = tp->link_config.advertising;
2158         }
2159
2160         phy_start(phydev);
2161
2162         phy_start_aneg(phydev);
2163 }
2164
2165 static void tg3_phy_stop(struct tg3 *tp)
2166 {
2167         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2168                 return;
2169
2170         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2171 }
2172
2173 static void tg3_phy_fini(struct tg3 *tp)
2174 {
2175         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2176                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2177                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2178         }
2179 }
2180
2181 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2182 {
2183         int err;
2184         u32 val;
2185
2186         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2187                 return 0;
2188
2189         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2190                 /* Cannot do read-modify-write on 5401 */
2191                 err = tg3_phy_auxctl_write(tp,
2192                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2193                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2194                                            0x4c20);
2195                 goto done;
2196         }
2197
2198         err = tg3_phy_auxctl_read(tp,
2199                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2200         if (err)
2201                 return err;
2202
2203         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2204         err = tg3_phy_auxctl_write(tp,
2205                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2206
2207 done:
2208         return err;
2209 }
2210
2211 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2212 {
2213         u32 phytest;
2214
2215         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2216                 u32 phy;
2217
2218                 tg3_writephy(tp, MII_TG3_FET_TEST,
2219                              phytest | MII_TG3_FET_SHADOW_EN);
2220                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2221                         if (enable)
2222                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2223                         else
2224                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2225                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2226                 }
2227                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2228         }
2229 }
2230
2231 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2232 {
2233         u32 reg;
2234
2235         if (!tg3_flag(tp, 5705_PLUS) ||
2236             (tg3_flag(tp, 5717_PLUS) &&
2237              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2238                 return;
2239
2240         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2241                 tg3_phy_fet_toggle_apd(tp, enable);
2242                 return;
2243         }
2244
2245         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2246               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2247               MII_TG3_MISC_SHDW_SCR5_SDTL |
2248               MII_TG3_MISC_SHDW_SCR5_C125OE;
2249         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2250                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2251
2252         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2253
2254
2255         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2256         if (enable)
2257                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2258
2259         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2260 }
2261
2262 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2263 {
2264         u32 phy;
2265
2266         if (!tg3_flag(tp, 5705_PLUS) ||
2267             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2268                 return;
2269
2270         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2271                 u32 ephy;
2272
2273                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2274                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2275
2276                         tg3_writephy(tp, MII_TG3_FET_TEST,
2277                                      ephy | MII_TG3_FET_SHADOW_EN);
2278                         if (!tg3_readphy(tp, reg, &phy)) {
2279                                 if (enable)
2280                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2281                                 else
2282                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2283                                 tg3_writephy(tp, reg, phy);
2284                         }
2285                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2286                 }
2287         } else {
2288                 int ret;
2289
2290                 ret = tg3_phy_auxctl_read(tp,
2291                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2292                 if (!ret) {
2293                         if (enable)
2294                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2295                         else
2296                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2297                         tg3_phy_auxctl_write(tp,
2298                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2299                 }
2300         }
2301 }
2302
2303 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2304 {
2305         int ret;
2306         u32 val;
2307
2308         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2309                 return;
2310
2311         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2312         if (!ret)
2313                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2314                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2315 }
2316
2317 static void tg3_phy_apply_otp(struct tg3 *tp)
2318 {
2319         u32 otp, phy;
2320
2321         if (!tp->phy_otp)
2322                 return;
2323
2324         otp = tp->phy_otp;
2325
2326         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2327                 return;
2328
2329         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2330         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2331         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2332
2333         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2334               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2335         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2336
2337         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2338         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2339         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2340
2341         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2342         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2343
2344         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2345         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2346
2347         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2348               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2349         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2350
2351         tg3_phy_toggle_auxctl_smdsp(tp, false);
2352 }
2353
2354 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2355 {
2356         u32 val;
2357         struct ethtool_eee *dest = &tp->eee;
2358
2359         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2360                 return;
2361
2362         if (eee)
2363                 dest = eee;
2364
2365         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2366                 return;
2367
2368         /* Pull eee_active */
2369         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2370             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2371                 dest->eee_active = 1;
2372         } else
2373                 dest->eee_active = 0;
2374
2375         /* Pull lp advertised settings */
2376         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2377                 return;
2378         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2379
2380         /* Pull advertised and eee_enabled settings */
2381         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2382                 return;
2383         dest->eee_enabled = !!val;
2384         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2385
2386         /* Pull tx_lpi_enabled */
2387         val = tr32(TG3_CPMU_EEE_MODE);
2388         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2389
2390         /* Pull lpi timer value */
2391         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2392 }
2393
2394 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2395 {
2396         u32 val;
2397
2398         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2399                 return;
2400
2401         tp->setlpicnt = 0;
2402
2403         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2404             current_link_up &&
2405             tp->link_config.active_duplex == DUPLEX_FULL &&
2406             (tp->link_config.active_speed == SPEED_100 ||
2407              tp->link_config.active_speed == SPEED_1000)) {
2408                 u32 eeectl;
2409
2410                 if (tp->link_config.active_speed == SPEED_1000)
2411                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2412                 else
2413                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2414
2415                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2416
2417                 tg3_eee_pull_config(tp, NULL);
2418                 if (tp->eee.eee_active)
2419                         tp->setlpicnt = 2;
2420         }
2421
2422         if (!tp->setlpicnt) {
2423                 if (current_link_up &&
2424                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2425                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2426                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2427                 }
2428
2429                 val = tr32(TG3_CPMU_EEE_MODE);
2430                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2431         }
2432 }
2433
2434 static void tg3_phy_eee_enable(struct tg3 *tp)
2435 {
2436         u32 val;
2437
2438         if (tp->link_config.active_speed == SPEED_1000 &&
2439             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2440              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2441              tg3_flag(tp, 57765_CLASS)) &&
2442             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2443                 val = MII_TG3_DSP_TAP26_ALNOKO |
2444                       MII_TG3_DSP_TAP26_RMRXSTO;
2445                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2446                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2447         }
2448
2449         val = tr32(TG3_CPMU_EEE_MODE);
2450         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2451 }
2452
2453 static int tg3_wait_macro_done(struct tg3 *tp)
2454 {
2455         int limit = 100;
2456
2457         while (limit--) {
2458                 u32 tmp32;
2459
2460                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2461                         if ((tmp32 & 0x1000) == 0)
2462                                 break;
2463                 }
2464         }
2465         if (limit < 0)
2466                 return -EBUSY;
2467
2468         return 0;
2469 }
2470
2471 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2472 {
2473         static const u32 test_pat[4][6] = {
2474         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2475         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2476         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2477         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2478         };
2479         int chan;
2480
2481         for (chan = 0; chan < 4; chan++) {
2482                 int i;
2483
2484                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485                              (chan * 0x2000) | 0x0200);
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2487
2488                 for (i = 0; i < 6; i++)
2489                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2490                                      test_pat[chan][i]);
2491
2492                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2493                 if (tg3_wait_macro_done(tp)) {
2494                         *resetp = 1;
2495                         return -EBUSY;
2496                 }
2497
2498                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2499                              (chan * 0x2000) | 0x0200);
2500                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2501                 if (tg3_wait_macro_done(tp)) {
2502                         *resetp = 1;
2503                         return -EBUSY;
2504                 }
2505
2506                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2507                 if (tg3_wait_macro_done(tp)) {
2508                         *resetp = 1;
2509                         return -EBUSY;
2510                 }
2511
2512                 for (i = 0; i < 6; i += 2) {
2513                         u32 low, high;
2514
2515                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2516                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2517                             tg3_wait_macro_done(tp)) {
2518                                 *resetp = 1;
2519                                 return -EBUSY;
2520                         }
2521                         low &= 0x7fff;
2522                         high &= 0x000f;
2523                         if (low != test_pat[chan][i] ||
2524                             high != test_pat[chan][i+1]) {
2525                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2526                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2527                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2528
2529                                 return -EBUSY;
2530                         }
2531                 }
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2538 {
2539         int chan;
2540
2541         for (chan = 0; chan < 4; chan++) {
2542                 int i;
2543
2544                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2545                              (chan * 0x2000) | 0x0200);
2546                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2547                 for (i = 0; i < 6; i++)
2548                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2549                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2550                 if (tg3_wait_macro_done(tp))
2551                         return -EBUSY;
2552         }
2553
2554         return 0;
2555 }
2556
2557 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2558 {
2559         u32 reg32, phy9_orig;
2560         int retries, do_phy_reset, err;
2561
2562         retries = 10;
2563         do_phy_reset = 1;
2564         do {
2565                 if (do_phy_reset) {
2566                         err = tg3_bmcr_reset(tp);
2567                         if (err)
2568                                 return err;
2569                         do_phy_reset = 0;
2570                 }
2571
2572                 /* Disable transmitter and interrupt.  */
2573                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2574                         continue;
2575
2576                 reg32 |= 0x3000;
2577                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2578
2579                 /* Set full-duplex, 1000 mbps.  */
2580                 tg3_writephy(tp, MII_BMCR,
2581                              BMCR_FULLDPLX | BMCR_SPEED1000);
2582
2583                 /* Set to master mode.  */
2584                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2585                         continue;
2586
2587                 tg3_writephy(tp, MII_CTRL1000,
2588                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2589
2590                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2591                 if (err)
2592                         return err;
2593
2594                 /* Block the PHY control access.  */
2595                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2596
2597                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2598                 if (!err)
2599                         break;
2600         } while (--retries);
2601
2602         err = tg3_phy_reset_chanpat(tp);
2603         if (err)
2604                 return err;
2605
2606         tg3_phydsp_write(tp, 0x8005, 0x0000);
2607
2608         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2609         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2610
2611         tg3_phy_toggle_auxctl_smdsp(tp, false);
2612
2613         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2614
2615         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2616         if (err)
2617                 return err;
2618
2619         reg32 &= ~0x3000;
2620         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2621
2622         return 0;
2623 }
2624
2625 static void tg3_carrier_off(struct tg3 *tp)
2626 {
2627         netif_carrier_off(tp->dev);
2628         tp->link_up = false;
2629 }
2630
2631 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2632 {
2633         if (tg3_flag(tp, ENABLE_ASF))
2634                 netdev_warn(tp->dev,
2635                             "Management side-band traffic will be interrupted during phy settings change\n");
2636 }
2637
2638 /* This will reset the tigon3 PHY if there is no valid
2639  * link unless the FORCE argument is non-zero.
2640  */
2641 static int tg3_phy_reset(struct tg3 *tp)
2642 {
2643         u32 val, cpmuctrl;
2644         int err;
2645
2646         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2647                 val = tr32(GRC_MISC_CFG);
2648                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2649                 udelay(40);
2650         }
2651         err  = tg3_readphy(tp, MII_BMSR, &val);
2652         err |= tg3_readphy(tp, MII_BMSR, &val);
2653         if (err != 0)
2654                 return -EBUSY;
2655
2656         if (netif_running(tp->dev) && tp->link_up) {
2657                 netif_carrier_off(tp->dev);
2658                 tg3_link_report(tp);
2659         }
2660
2661         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2662             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2663             tg3_asic_rev(tp) == ASIC_REV_5705) {
2664                 err = tg3_phy_reset_5703_4_5(tp);
2665                 if (err)
2666                         return err;
2667                 goto out;
2668         }
2669
2670         cpmuctrl = 0;
2671         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2672             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2673                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2674                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2675                         tw32(TG3_CPMU_CTRL,
2676                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2677         }
2678
2679         err = tg3_bmcr_reset(tp);
2680         if (err)
2681                 return err;
2682
2683         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2684                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2685                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2686
2687                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2688         }
2689
2690         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2691             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2692                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2693                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2694                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2695                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2696                         udelay(40);
2697                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2698                 }
2699         }
2700
2701         if (tg3_flag(tp, 5717_PLUS) &&
2702             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2703                 return 0;
2704
2705         tg3_phy_apply_otp(tp);
2706
2707         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2708                 tg3_phy_toggle_apd(tp, true);
2709         else
2710                 tg3_phy_toggle_apd(tp, false);
2711
2712 out:
2713         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2714             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2716                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2717                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718         }
2719
2720         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2721                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2722                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2723         }
2724
2725         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2726                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2728                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2729                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2730                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2731                 }
2732         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2733                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2734                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2735                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2736                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2737                                 tg3_writephy(tp, MII_TG3_TEST1,
2738                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2739                         } else
2740                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2741
2742                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2743                 }
2744         }
2745
2746         /* Set Extended packet length bit (bit 14) on all chips that */
2747         /* support jumbo frames */
2748         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2749                 /* Cannot do read-modify-write on 5401 */
2750                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2751         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752                 /* Set bit 14 with read-modify-write to preserve other bits */
2753                 err = tg3_phy_auxctl_read(tp,
2754                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2755                 if (!err)
2756                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2757                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2758         }
2759
2760         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2761          * jumbo frames transmission.
2762          */
2763         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2764                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2765                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2766                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2767         }
2768
2769         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2770                 /* adjust output voltage */
2771                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2772         }
2773
2774         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2775                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2776
2777         tg3_phy_toggle_automdix(tp, true);
2778         tg3_phy_set_wirespeed(tp);
2779         return 0;
2780 }
2781
2782 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2783 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2784 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2785                                           TG3_GPIO_MSG_NEED_VAUX)
2786 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2787         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2788          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2789          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2790          (TG3_GPIO_MSG_DRVR_PRES << 12))
2791
2792 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2793         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2794          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2795          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2796          (TG3_GPIO_MSG_NEED_VAUX << 12))
2797
2798 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2799 {
2800         u32 status, shift;
2801
2802         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2803             tg3_asic_rev(tp) == ASIC_REV_5719)
2804                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2805         else
2806                 status = tr32(TG3_CPMU_DRV_STATUS);
2807
2808         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2809         status &= ~(TG3_GPIO_MSG_MASK << shift);
2810         status |= (newstat << shift);
2811
2812         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813             tg3_asic_rev(tp) == ASIC_REV_5719)
2814                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2815         else
2816                 tw32(TG3_CPMU_DRV_STATUS, status);
2817
2818         return status >> TG3_APE_GPIO_MSG_SHIFT;
2819 }
2820
2821 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2822 {
2823         if (!tg3_flag(tp, IS_NIC))
2824                 return 0;
2825
2826         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2827             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2828             tg3_asic_rev(tp) == ASIC_REV_5720) {
2829                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2830                         return -EIO;
2831
2832                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2833
2834                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2835                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2838         } else {
2839                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2840                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2841         }
2842
2843         return 0;
2844 }
2845
2846 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2847 {
2848         u32 grc_local_ctrl;
2849
2850         if (!tg3_flag(tp, IS_NIC) ||
2851             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2852             tg3_asic_rev(tp) == ASIC_REV_5701)
2853                 return;
2854
2855         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2856
2857         tw32_wait_f(GRC_LOCAL_CTRL,
2858                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2860
2861         tw32_wait_f(GRC_LOCAL_CTRL,
2862                     grc_local_ctrl,
2863                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2864
2865         tw32_wait_f(GRC_LOCAL_CTRL,
2866                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2867                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2868 }
2869
2870 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2871 {
2872         if (!tg3_flag(tp, IS_NIC))
2873                 return;
2874
2875         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2876             tg3_asic_rev(tp) == ASIC_REV_5701) {
2877                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2878                             (GRC_LCLCTRL_GPIO_OE0 |
2879                              GRC_LCLCTRL_GPIO_OE1 |
2880                              GRC_LCLCTRL_GPIO_OE2 |
2881                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2882                              GRC_LCLCTRL_GPIO_OUTPUT1),
2883                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2884         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2885                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2886                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2887                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2888                                      GRC_LCLCTRL_GPIO_OE1 |
2889                                      GRC_LCLCTRL_GPIO_OE2 |
2890                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2891                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2892                                      tp->grc_local_ctrl;
2893                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895
2896                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2897                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2898                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2899
2900                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2901                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2902                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2903         } else {
2904                 u32 no_gpio2;
2905                 u32 grc_local_ctrl = 0;
2906
2907                 /* Workaround to prevent overdrawing Amps. */
2908                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2909                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2910                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2911                                     grc_local_ctrl,
2912                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2913                 }
2914
2915                 /* On 5753 and variants, GPIO2 cannot be used. */
2916                 no_gpio2 = tp->nic_sram_data_cfg &
2917                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2918
2919                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2920                                   GRC_LCLCTRL_GPIO_OE1 |
2921                                   GRC_LCLCTRL_GPIO_OE2 |
2922                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2923                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2924                 if (no_gpio2) {
2925                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2926                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2927                 }
2928                 tw32_wait_f(GRC_LOCAL_CTRL,
2929                             tp->grc_local_ctrl | grc_local_ctrl,
2930                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2931
2932                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2933
2934                 tw32_wait_f(GRC_LOCAL_CTRL,
2935                             tp->grc_local_ctrl | grc_local_ctrl,
2936                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2937
2938                 if (!no_gpio2) {
2939                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2940                         tw32_wait_f(GRC_LOCAL_CTRL,
2941                                     tp->grc_local_ctrl | grc_local_ctrl,
2942                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2943                 }
2944         }
2945 }
2946
2947 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2948 {
2949         u32 msg = 0;
2950
2951         /* Serialize power state transitions */
2952         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2953                 return;
2954
2955         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2956                 msg = TG3_GPIO_MSG_NEED_VAUX;
2957
2958         msg = tg3_set_function_status(tp, msg);
2959
2960         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2961                 goto done;
2962
2963         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2964                 tg3_pwrsrc_switch_to_vaux(tp);
2965         else
2966                 tg3_pwrsrc_die_with_vmain(tp);
2967
2968 done:
2969         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2970 }
2971
2972 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2973 {
2974         bool need_vaux = false;
2975
2976         /* The GPIOs do something completely different on 57765. */
2977         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2978                 return;
2979
2980         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2981             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2982             tg3_asic_rev(tp) == ASIC_REV_5720) {
2983                 tg3_frob_aux_power_5717(tp, include_wol ?
2984                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2985                 return;
2986         }
2987
2988         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2989                 struct net_device *dev_peer;
2990
2991                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2992
2993                 /* remove_one() may have been run on the peer. */
2994                 if (dev_peer) {
2995                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2996
2997                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2998                                 return;
2999
3000                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3001                             tg3_flag(tp_peer, ENABLE_ASF))
3002                                 need_vaux = true;
3003                 }
3004         }
3005
3006         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3007             tg3_flag(tp, ENABLE_ASF))
3008                 need_vaux = true;
3009
3010         if (need_vaux)
3011                 tg3_pwrsrc_switch_to_vaux(tp);
3012         else
3013                 tg3_pwrsrc_die_with_vmain(tp);
3014 }
3015
3016 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3017 {
3018         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3019                 return 1;
3020         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3021                 if (speed != SPEED_10)
3022                         return 1;
3023         } else if (speed == SPEED_10)
3024                 return 1;
3025
3026         return 0;
3027 }
3028
3029 static bool tg3_phy_power_bug(struct tg3 *tp)
3030 {
3031         switch (tg3_asic_rev(tp)) {
3032         case ASIC_REV_5700:
3033         case ASIC_REV_5704:
3034                 return true;
3035         case ASIC_REV_5780:
3036                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3037                         return true;
3038                 return false;
3039         case ASIC_REV_5717:
3040                 if (!tp->pci_fn)
3041                         return true;
3042                 return false;
3043         case ASIC_REV_5719:
3044         case ASIC_REV_5720:
3045                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3046                     !tp->pci_fn)
3047                         return true;
3048                 return false;
3049         }
3050
3051         return false;
3052 }
3053
3054 static bool tg3_phy_led_bug(struct tg3 *tp)
3055 {
3056         switch (tg3_asic_rev(tp)) {
3057         case ASIC_REV_5719:
3058         case ASIC_REV_5720:
3059                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3060                     !tp->pci_fn)
3061                         return true;
3062                 return false;
3063         }
3064
3065         return false;
3066 }
3067
3068 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3069 {
3070         u32 val;
3071
3072         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3073                 return;
3074
3075         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3076                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3077                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3078                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3079
3080                         sg_dig_ctrl |=
3081                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3082                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3083                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3084                 }
3085                 return;
3086         }
3087
3088         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3089                 tg3_bmcr_reset(tp);
3090                 val = tr32(GRC_MISC_CFG);
3091                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3092                 udelay(40);
3093                 return;
3094         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3095                 u32 phytest;
3096                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3097                         u32 phy;
3098
3099                         tg3_writephy(tp, MII_ADVERTISE, 0);
3100                         tg3_writephy(tp, MII_BMCR,
3101                                      BMCR_ANENABLE | BMCR_ANRESTART);
3102
3103                         tg3_writephy(tp, MII_TG3_FET_TEST,
3104                                      phytest | MII_TG3_FET_SHADOW_EN);
3105                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3106                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3107                                 tg3_writephy(tp,
3108                                              MII_TG3_FET_SHDW_AUXMODE4,
3109                                              phy);
3110                         }
3111                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3112                 }
3113                 return;
3114         } else if (do_low_power) {
3115                 if (!tg3_phy_led_bug(tp))
3116                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3117                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3118
3119                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3120                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3121                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3122                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3123         }
3124
3125         /* The PHY should not be powered down on some chips because
3126          * of bugs.
3127          */
3128         if (tg3_phy_power_bug(tp))
3129                 return;
3130
3131         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3132             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3133                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3134                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3135                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3136                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3137         }
3138
3139         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3140 }
3141
3142 /* tp->lock is held. */
3143 static int tg3_nvram_lock(struct tg3 *tp)
3144 {
3145         if (tg3_flag(tp, NVRAM)) {
3146                 int i;
3147
3148                 if (tp->nvram_lock_cnt == 0) {
3149                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3150                         for (i = 0; i < 8000; i++) {
3151                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3152                                         break;
3153                                 udelay(20);
3154                         }
3155                         if (i == 8000) {
3156                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3157                                 return -ENODEV;
3158                         }
3159                 }
3160                 tp->nvram_lock_cnt++;
3161         }
3162         return 0;
3163 }
3164
3165 /* tp->lock is held. */
3166 static void tg3_nvram_unlock(struct tg3 *tp)
3167 {
3168         if (tg3_flag(tp, NVRAM)) {
3169                 if (tp->nvram_lock_cnt > 0)
3170                         tp->nvram_lock_cnt--;
3171                 if (tp->nvram_lock_cnt == 0)
3172                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3173         }
3174 }
3175
3176 /* tp->lock is held. */
3177 static void tg3_enable_nvram_access(struct tg3 *tp)
3178 {
3179         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3180                 u32 nvaccess = tr32(NVRAM_ACCESS);
3181
3182                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3183         }
3184 }
3185
3186 /* tp->lock is held. */
3187 static void tg3_disable_nvram_access(struct tg3 *tp)
3188 {
3189         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3190                 u32 nvaccess = tr32(NVRAM_ACCESS);
3191
3192                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3193         }
3194 }
3195
3196 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3197                                         u32 offset, u32 *val)
3198 {
3199         u32 tmp;
3200         int i;
3201
3202         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3203                 return -EINVAL;
3204
3205         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3206                                         EEPROM_ADDR_DEVID_MASK |
3207                                         EEPROM_ADDR_READ);
3208         tw32(GRC_EEPROM_ADDR,
3209              tmp |
3210              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3211              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3212               EEPROM_ADDR_ADDR_MASK) |
3213              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3214
3215         for (i = 0; i < 1000; i++) {
3216                 tmp = tr32(GRC_EEPROM_ADDR);
3217
3218                 if (tmp & EEPROM_ADDR_COMPLETE)
3219                         break;
3220                 msleep(1);
3221         }
3222         if (!(tmp & EEPROM_ADDR_COMPLETE))
3223                 return -EBUSY;
3224
3225         tmp = tr32(GRC_EEPROM_DATA);
3226
3227         /*
3228          * The data will always be opposite the native endian
3229          * format.  Perform a blind byteswap to compensate.
3230          */
3231         *val = swab32(tmp);
3232
3233         return 0;
3234 }
3235
3236 #define NVRAM_CMD_TIMEOUT 10000
3237
3238 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3239 {
3240         int i;
3241
3242         tw32(NVRAM_CMD, nvram_cmd);
3243         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3244                 usleep_range(10, 40);
3245                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3246                         udelay(10);
3247                         break;
3248                 }
3249         }
3250
3251         if (i == NVRAM_CMD_TIMEOUT)
3252                 return -EBUSY;
3253
3254         return 0;
3255 }
3256
3257 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3258 {
3259         if (tg3_flag(tp, NVRAM) &&
3260             tg3_flag(tp, NVRAM_BUFFERED) &&
3261             tg3_flag(tp, FLASH) &&
3262             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3263             (tp->nvram_jedecnum == JEDEC_ATMEL))
3264
3265                 addr = ((addr / tp->nvram_pagesize) <<
3266                         ATMEL_AT45DB0X1B_PAGE_POS) +
3267                        (addr % tp->nvram_pagesize);
3268
3269         return addr;
3270 }
3271
3272 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3273 {
3274         if (tg3_flag(tp, NVRAM) &&
3275             tg3_flag(tp, NVRAM_BUFFERED) &&
3276             tg3_flag(tp, FLASH) &&
3277             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3278             (tp->nvram_jedecnum == JEDEC_ATMEL))
3279
3280                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3281                         tp->nvram_pagesize) +
3282                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3283
3284         return addr;
3285 }
3286
3287 /* NOTE: Data read in from NVRAM is byteswapped according to
3288  * the byteswapping settings for all other register accesses.
3289  * tg3 devices are BE devices, so on a BE machine, the data
3290  * returned will be exactly as it is seen in NVRAM.  On a LE
3291  * machine, the 32-bit value will be byteswapped.
3292  */
3293 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3294 {
3295         int ret;
3296
3297         if (!tg3_flag(tp, NVRAM))
3298                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3299
3300         offset = tg3_nvram_phys_addr(tp, offset);
3301
3302         if (offset > NVRAM_ADDR_MSK)
3303                 return -EINVAL;
3304
3305         ret = tg3_nvram_lock(tp);
3306         if (ret)
3307                 return ret;
3308
3309         tg3_enable_nvram_access(tp);
3310
3311         tw32(NVRAM_ADDR, offset);
3312         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3313                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3314
3315         if (ret == 0)
3316                 *val = tr32(NVRAM_RDDATA);
3317
3318         tg3_disable_nvram_access(tp);
3319
3320         tg3_nvram_unlock(tp);
3321
3322         return ret;
3323 }
3324
3325 /* Ensures NVRAM data is in bytestream format. */
3326 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3327 {
3328         u32 v;
3329         int res = tg3_nvram_read(tp, offset, &v);
3330         if (!res)
3331                 *val = cpu_to_be32(v);
3332         return res;
3333 }
3334
3335 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3336                                     u32 offset, u32 len, u8 *buf)
3337 {
3338         int i, j, rc = 0;
3339         u32 val;
3340
3341         for (i = 0; i < len; i += 4) {
3342                 u32 addr;
3343                 __be32 data;
3344
3345                 addr = offset + i;
3346
3347                 memcpy(&data, buf + i, 4);
3348
3349                 /*
3350                  * The SEEPROM interface expects the data to always be opposite
3351                  * the native endian format.  We accomplish this by reversing
3352                  * all the operations that would have been performed on the
3353                  * data from a call to tg3_nvram_read_be32().
3354                  */
3355                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3356
3357                 val = tr32(GRC_EEPROM_ADDR);
3358                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3359
3360                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3361                         EEPROM_ADDR_READ);
3362                 tw32(GRC_EEPROM_ADDR, val |
3363                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3364                         (addr & EEPROM_ADDR_ADDR_MASK) |
3365                         EEPROM_ADDR_START |
3366                         EEPROM_ADDR_WRITE);
3367
3368                 for (j = 0; j < 1000; j++) {
3369                         val = tr32(GRC_EEPROM_ADDR);
3370
3371                         if (val & EEPROM_ADDR_COMPLETE)
3372                                 break;
3373                         msleep(1);
3374                 }
3375                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3376                         rc = -EBUSY;
3377                         break;
3378                 }
3379         }
3380
3381         return rc;
3382 }
3383
3384 /* offset and length are dword aligned */
3385 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3386                 u8 *buf)
3387 {
3388         int ret = 0;
3389         u32 pagesize = tp->nvram_pagesize;
3390         u32 pagemask = pagesize - 1;
3391         u32 nvram_cmd;
3392         u8 *tmp;
3393
3394         tmp = kmalloc(pagesize, GFP_KERNEL);
3395         if (tmp == NULL)
3396                 return -ENOMEM;
3397
3398         while (len) {
3399                 int j;
3400                 u32 phy_addr, page_off, size;
3401
3402                 phy_addr = offset & ~pagemask;
3403
3404                 for (j = 0; j < pagesize; j += 4) {
3405                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3406                                                   (__be32 *) (tmp + j));
3407                         if (ret)
3408                                 break;
3409                 }
3410                 if (ret)
3411                         break;
3412
3413                 page_off = offset & pagemask;
3414                 size = pagesize;
3415                 if (len < size)
3416                         size = len;
3417
3418                 len -= size;
3419
3420                 memcpy(tmp + page_off, buf, size);
3421
3422                 offset = offset + (pagesize - page_off);
3423
3424                 tg3_enable_nvram_access(tp);
3425
3426                 /*
3427                  * Before we can erase the flash page, we need
3428                  * to issue a special "write enable" command.
3429                  */
3430                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3431
3432                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433                         break;
3434
3435                 /* Erase the target page */
3436                 tw32(NVRAM_ADDR, phy_addr);
3437
3438                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3439                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3440
3441                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3442                         break;
3443
3444                 /* Issue another write enable to start the write. */
3445                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3446
3447                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3448                         break;
3449
3450                 for (j = 0; j < pagesize; j += 4) {
3451                         __be32 data;
3452
3453                         data = *((__be32 *) (tmp + j));
3454
3455                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3456
3457                         tw32(NVRAM_ADDR, phy_addr + j);
3458
3459                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3460                                 NVRAM_CMD_WR;
3461
3462                         if (j == 0)
3463                                 nvram_cmd |= NVRAM_CMD_FIRST;
3464                         else if (j == (pagesize - 4))
3465                                 nvram_cmd |= NVRAM_CMD_LAST;
3466
3467                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3468                         if (ret)
3469                                 break;
3470                 }
3471                 if (ret)
3472                         break;
3473         }
3474
3475         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3476         tg3_nvram_exec_cmd(tp, nvram_cmd);
3477
3478         kfree(tmp);
3479
3480         return ret;
3481 }
3482
3483 /* offset and length are dword aligned */
3484 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3485                 u8 *buf)
3486 {
3487         int i, ret = 0;
3488
3489         for (i = 0; i < len; i += 4, offset += 4) {
3490                 u32 page_off, phy_addr, nvram_cmd;
3491                 __be32 data;
3492
3493                 memcpy(&data, buf + i, 4);
3494                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3495
3496                 page_off = offset % tp->nvram_pagesize;
3497
3498                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3499
3500                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3501
3502                 if (page_off == 0 || i == 0)
3503                         nvram_cmd |= NVRAM_CMD_FIRST;
3504                 if (page_off == (tp->nvram_pagesize - 4))
3505                         nvram_cmd |= NVRAM_CMD_LAST;
3506
3507                 if (i == (len - 4))
3508                         nvram_cmd |= NVRAM_CMD_LAST;
3509
3510                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3511                     !tg3_flag(tp, FLASH) ||
3512                     !tg3_flag(tp, 57765_PLUS))
3513                         tw32(NVRAM_ADDR, phy_addr);
3514
3515                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3516                     !tg3_flag(tp, 5755_PLUS) &&
3517                     (tp->nvram_jedecnum == JEDEC_ST) &&
3518                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3519                         u32 cmd;
3520
3521                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3522                         ret = tg3_nvram_exec_cmd(tp, cmd);
3523                         if (ret)
3524                                 break;
3525                 }
3526                 if (!tg3_flag(tp, FLASH)) {
3527                         /* We always do complete word writes to eeprom. */
3528                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3529                 }
3530
3531                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3532                 if (ret)
3533                         break;
3534         }
3535         return ret;
3536 }
3537
3538 /* offset and length are dword aligned */
3539 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3540 {
3541         int ret;
3542
3543         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3544                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3545                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3546                 udelay(40);
3547         }
3548
3549         if (!tg3_flag(tp, NVRAM)) {
3550                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3551         } else {
3552                 u32 grc_mode;
3553
3554                 ret = tg3_nvram_lock(tp);
3555                 if (ret)
3556                         return ret;
3557
3558                 tg3_enable_nvram_access(tp);
3559                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3560                         tw32(NVRAM_WRITE1, 0x406);
3561
3562                 grc_mode = tr32(GRC_MODE);
3563                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3564
3565                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3566                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3567                                 buf);
3568                 } else {
3569                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3570                                 buf);
3571                 }
3572
3573                 grc_mode = tr32(GRC_MODE);
3574                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3575
3576                 tg3_disable_nvram_access(tp);
3577                 tg3_nvram_unlock(tp);
3578         }
3579
3580         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3581                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3582                 udelay(40);
3583         }
3584
3585         return ret;
3586 }
3587
3588 #define RX_CPU_SCRATCH_BASE     0x30000
3589 #define RX_CPU_SCRATCH_SIZE     0x04000
3590 #define TX_CPU_SCRATCH_BASE     0x34000
3591 #define TX_CPU_SCRATCH_SIZE     0x04000
3592
3593 /* tp->lock is held. */
3594 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596         int i;
3597         const int iters = 10000;
3598
3599         for (i = 0; i < iters; i++) {
3600                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3601                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3602                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3603                         break;
3604                 if (pci_channel_offline(tp->pdev))
3605                         return -EBUSY;
3606         }
3607
3608         return (i == iters) ? -EBUSY : 0;
3609 }
3610
3611 /* tp->lock is held. */
3612 static int tg3_rxcpu_pause(struct tg3 *tp)
3613 {
3614         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3615
3616         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3617         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3618         udelay(10);
3619
3620         return rc;
3621 }
3622
3623 /* tp->lock is held. */
3624 static int tg3_txcpu_pause(struct tg3 *tp)
3625 {
3626         return tg3_pause_cpu(tp, TX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
3630 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632         tw32(cpu_base + CPU_STATE, 0xffffffff);
3633         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3634 }
3635
3636 /* tp->lock is held. */
3637 static void tg3_rxcpu_resume(struct tg3 *tp)
3638 {
3639         tg3_resume_cpu(tp, RX_CPU_BASE);
3640 }
3641
3642 /* tp->lock is held. */
3643 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3644 {
3645         int rc;
3646
3647         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3648
3649         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3650                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3651
3652                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3653                 return 0;
3654         }
3655         if (cpu_base == RX_CPU_BASE) {
3656                 rc = tg3_rxcpu_pause(tp);
3657         } else {
3658                 /*
3659                  * There is only an Rx CPU for the 5750 derivative in the
3660                  * BCM4785.
3661                  */
3662                 if (tg3_flag(tp, IS_SSB_CORE))
3663                         return 0;
3664
3665                 rc = tg3_txcpu_pause(tp);
3666         }
3667
3668         if (rc) {
3669                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3670                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3671                 return -ENODEV;
3672         }
3673
3674         /* Clear firmware's nvram arbitration. */
3675         if (tg3_flag(tp, NVRAM))
3676                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3677         return 0;
3678 }
3679
3680 static int tg3_fw_data_len(struct tg3 *tp,
3681                            const struct tg3_firmware_hdr *fw_hdr)
3682 {
3683         int fw_len;
3684
3685         /* Non fragmented firmware have one firmware header followed by a
3686          * contiguous chunk of data to be written. The length field in that
3687          * header is not the length of data to be written but the complete
3688          * length of the bss. The data length is determined based on
3689          * tp->fw->size minus headers.
3690          *
3691          * Fragmented firmware have a main header followed by multiple
3692          * fragments. Each fragment is identical to non fragmented firmware
3693          * with a firmware header followed by a contiguous chunk of data. In
3694          * the main header, the length field is unused and set to 0xffffffff.
3695          * In each fragment header the length is the entire size of that
3696          * fragment i.e. fragment data + header length. Data length is
3697          * therefore length field in the header minus TG3_FW_HDR_LEN.
3698          */
3699         if (tp->fw_len == 0xffffffff)
3700                 fw_len = be32_to_cpu(fw_hdr->len);
3701         else
3702                 fw_len = tp->fw->size;
3703
3704         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3705 }
3706
3707 /* tp->lock is held. */
3708 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3709                                  u32 cpu_scratch_base, int cpu_scratch_size,
3710                                  const struct tg3_firmware_hdr *fw_hdr)
3711 {
3712         int err, i;
3713         void (*write_op)(struct tg3 *, u32, u32);
3714         int total_len = tp->fw->size;
3715
3716         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3717                 netdev_err(tp->dev,
3718                            "%s: Trying to load TX cpu firmware which is 5705\n",
3719                            __func__);
3720                 return -EINVAL;
3721         }
3722
3723         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3724                 write_op = tg3_write_mem;
3725         else
3726                 write_op = tg3_write_indirect_reg32;
3727
3728         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3729                 /* It is possible that bootcode is still loading at this point.
3730                  * Get the nvram lock first before halting the cpu.
3731                  */
3732                 int lock_err = tg3_nvram_lock(tp);
3733                 err = tg3_halt_cpu(tp, cpu_base);
3734                 if (!lock_err)
3735                         tg3_nvram_unlock(tp);
3736                 if (err)
3737                         goto out;
3738
3739                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3740                         write_op(tp, cpu_scratch_base + i, 0);
3741                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3742                 tw32(cpu_base + CPU_MODE,
3743                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3744         } else {
3745                 /* Subtract additional main header for fragmented firmware and
3746                  * advance to the first fragment
3747                  */
3748                 total_len -= TG3_FW_HDR_LEN;
3749                 fw_hdr++;
3750         }
3751
3752         do {
3753                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3754                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3755                         write_op(tp, cpu_scratch_base +
3756                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3757                                      (i * sizeof(u32)),
3758                                  be32_to_cpu(fw_data[i]));
3759
3760                 total_len -= be32_to_cpu(fw_hdr->len);
3761
3762                 /* Advance to next fragment */
3763                 fw_hdr = (struct tg3_firmware_hdr *)
3764                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3765         } while (total_len > 0);
3766
3767         err = 0;
3768
3769 out:
3770         return err;
3771 }
3772
3773 /* tp->lock is held. */
3774 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3775 {
3776         int i;
3777         const int iters = 5;
3778
3779         tw32(cpu_base + CPU_STATE, 0xffffffff);
3780         tw32_f(cpu_base + CPU_PC, pc);
3781
3782         for (i = 0; i < iters; i++) {
3783                 if (tr32(cpu_base + CPU_PC) == pc)
3784                         break;
3785                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3786                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3787                 tw32_f(cpu_base + CPU_PC, pc);
3788                 udelay(1000);
3789         }
3790
3791         return (i == iters) ? -EBUSY : 0;
3792 }
3793
3794 /* tp->lock is held. */
3795 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3796 {
3797         const struct tg3_firmware_hdr *fw_hdr;
3798         int err;
3799
3800         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3801
3802         /* Firmware blob starts with version numbers, followed by
3803            start address and length. We are setting complete length.
3804            length = end_address_of_bss - start_address_of_text.
3805            Remainder is the blob to be loaded contiguously
3806            from start address. */
3807
3808         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3809                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3810                                     fw_hdr);
3811         if (err)
3812                 return err;
3813
3814         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3815                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3816                                     fw_hdr);
3817         if (err)
3818                 return err;
3819
3820         /* Now startup only the RX cpu. */
3821         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3822                                        be32_to_cpu(fw_hdr->base_addr));
3823         if (err) {
3824                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3825                            "should be %08x\n", __func__,
3826                            tr32(RX_CPU_BASE + CPU_PC),
3827                                 be32_to_cpu(fw_hdr->base_addr));
3828                 return -ENODEV;
3829         }
3830
3831         tg3_rxcpu_resume(tp);
3832
3833         return 0;
3834 }
3835
3836 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3837 {
3838         const int iters = 1000;
3839         int i;
3840         u32 val;
3841
3842         /* Wait for boot code to complete initialization and enter service
3843          * loop. It is then safe to download service patches
3844          */
3845         for (i = 0; i < iters; i++) {
3846                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3847                         break;
3848
3849                 udelay(10);
3850         }
3851
3852         if (i == iters) {
3853                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3854                 return -EBUSY;
3855         }
3856
3857         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3858         if (val & 0xff) {
3859                 netdev_warn(tp->dev,
3860                             "Other patches exist. Not downloading EEE patch\n");
3861                 return -EEXIST;
3862         }
3863
3864         return 0;
3865 }
3866
3867 /* tp->lock is held. */
3868 static void tg3_load_57766_firmware(struct tg3 *tp)
3869 {
3870         struct tg3_firmware_hdr *fw_hdr;
3871
3872         if (!tg3_flag(tp, NO_NVRAM))
3873                 return;
3874
3875         if (tg3_validate_rxcpu_state(tp))
3876                 return;
3877
3878         if (!tp->fw)
3879                 return;
3880
3881         /* This firmware blob has a different format than older firmware
3882          * releases as given below. The main difference is we have fragmented
3883          * data to be written to non-contiguous locations.
3884          *
3885          * In the beginning we have a firmware header identical to other
3886          * firmware which consists of version, base addr and length. The length
3887          * here is unused and set to 0xffffffff.
3888          *
3889          * This is followed by a series of firmware fragments which are
3890          * individually identical to previous firmware. i.e. they have the
3891          * firmware header and followed by data for that fragment. The version
3892          * field of the individual fragment header is unused.
3893          */
3894
3895         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3896         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3897                 return;
3898
3899         if (tg3_rxcpu_pause(tp))
3900                 return;
3901
3902         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3903         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3904
3905         tg3_rxcpu_resume(tp);
3906 }
3907
3908 /* tp->lock is held. */
3909 static int tg3_load_tso_firmware(struct tg3 *tp)
3910 {
3911         const struct tg3_firmware_hdr *fw_hdr;
3912         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3913         int err;
3914
3915         if (!tg3_flag(tp, FW_TSO))
3916                 return 0;
3917
3918         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3919
3920         /* Firmware blob starts with version numbers, followed by
3921            start address and length. We are setting complete length.
3922            length = end_address_of_bss - start_address_of_text.
3923            Remainder is the blob to be loaded contiguously
3924            from start address. */
3925
3926         cpu_scratch_size = tp->fw_len;
3927
3928         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3929                 cpu_base = RX_CPU_BASE;
3930                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3931         } else {
3932                 cpu_base = TX_CPU_BASE;
3933                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3934                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3935         }
3936
3937         err = tg3_load_firmware_cpu(tp, cpu_base,
3938                                     cpu_scratch_base, cpu_scratch_size,
3939                                     fw_hdr);
3940         if (err)
3941                 return err;
3942
3943         /* Now startup the cpu. */
3944         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3945                                        be32_to_cpu(fw_hdr->base_addr));
3946         if (err) {
3947                 netdev_err(tp->dev,
3948                            "%s fails to set CPU PC, is %08x should be %08x\n",
3949                            __func__, tr32(cpu_base + CPU_PC),
3950                            be32_to_cpu(fw_hdr->base_addr));
3951                 return -ENODEV;
3952         }
3953
3954         tg3_resume_cpu(tp, cpu_base);
3955         return 0;
3956 }
3957
3958 /* tp->lock is held. */
3959 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3960 {
3961         u32 addr_high, addr_low;
3962
3963         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3964         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3965                     (mac_addr[4] <<  8) | mac_addr[5]);
3966
3967         if (index < 4) {
3968                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3969                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3970         } else {
3971                 index -= 4;
3972                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3973                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3974         }
3975 }
3976
3977 /* tp->lock is held. */
3978 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3979 {
3980         u32 addr_high;
3981         int i;
3982
3983         for (i = 0; i < 4; i++) {
3984                 if (i == 1 && skip_mac_1)
3985                         continue;
3986                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3987         }
3988
3989         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3990             tg3_asic_rev(tp) == ASIC_REV_5704) {
3991                 for (i = 4; i < 16; i++)
3992                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3993         }
3994
3995         addr_high = (tp->dev->dev_addr[0] +
3996                      tp->dev->dev_addr[1] +
3997                      tp->dev->dev_addr[2] +
3998                      tp->dev->dev_addr[3] +
3999                      tp->dev->dev_addr[4] +
4000                      tp->dev->dev_addr[5]) &
4001                 TX_BACKOFF_SEED_MASK;
4002         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4003 }
4004
4005 static void tg3_enable_register_access(struct tg3 *tp)
4006 {
4007         /*
4008          * Make sure register accesses (indirect or otherwise) will function
4009          * correctly.
4010          */
4011         pci_write_config_dword(tp->pdev,
4012                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4013 }
4014
4015 static int tg3_power_up(struct tg3 *tp)
4016 {
4017         int err;
4018
4019         tg3_enable_register_access(tp);
4020
4021         err = pci_set_power_state(tp->pdev, PCI_D0);
4022         if (!err) {
4023                 /* Switch out of Vaux if it is a NIC */
4024                 tg3_pwrsrc_switch_to_vmain(tp);
4025         } else {
4026                 netdev_err(tp->dev, "Transition to D0 failed\n");
4027         }
4028
4029         return err;
4030 }
4031
4032 static int tg3_setup_phy(struct tg3 *, bool);
4033
4034 static int tg3_power_down_prepare(struct tg3 *tp)
4035 {
4036         u32 misc_host_ctrl;
4037         bool device_should_wake, do_low_power;
4038
4039         tg3_enable_register_access(tp);
4040
4041         /* Restore the CLKREQ setting. */
4042         if (tg3_flag(tp, CLKREQ_BUG))
4043                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4044                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4045
4046         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4047         tw32(TG3PCI_MISC_HOST_CTRL,
4048              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4049
4050         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4051                              tg3_flag(tp, WOL_ENABLE);
4052
4053         if (tg3_flag(tp, USE_PHYLIB)) {
4054                 do_low_power = false;
4055                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4056                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4057                         struct phy_device *phydev;
4058                         u32 phyid, advertising;
4059
4060                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4061
4062                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4063
4064                         tp->link_config.speed = phydev->speed;
4065                         tp->link_config.duplex = phydev->duplex;
4066                         tp->link_config.autoneg = phydev->autoneg;
4067                         tp->link_config.advertising = phydev->advertising;
4068
4069                         advertising = ADVERTISED_TP |
4070                                       ADVERTISED_Pause |
4071                                       ADVERTISED_Autoneg |
4072                                       ADVERTISED_10baseT_Half;
4073
4074                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4075                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4076                                         advertising |=
4077                                                 ADVERTISED_100baseT_Half |
4078                                                 ADVERTISED_100baseT_Full |
4079                                                 ADVERTISED_10baseT_Full;
4080                                 else
4081                                         advertising |= ADVERTISED_10baseT_Full;
4082                         }
4083
4084                         phydev->advertising = advertising;
4085
4086                         phy_start_aneg(phydev);
4087
4088                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4089                         if (phyid != PHY_ID_BCMAC131) {
4090                                 phyid &= PHY_BCM_OUI_MASK;
4091                                 if (phyid == PHY_BCM_OUI_1 ||
4092                                     phyid == PHY_BCM_OUI_2 ||
4093                                     phyid == PHY_BCM_OUI_3)
4094                                         do_low_power = true;
4095                         }
4096                 }
4097         } else {
4098                 do_low_power = true;
4099
4100                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4101                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4102
4103                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4104                         tg3_setup_phy(tp, false);
4105         }
4106
4107         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4108                 u32 val;
4109
4110                 val = tr32(GRC_VCPU_EXT_CTRL);
4111                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4112         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4113                 int i;
4114                 u32 val;
4115
4116                 for (i = 0; i < 200; i++) {
4117                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4118                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4119                                 break;
4120                         msleep(1);
4121                 }
4122         }
4123         if (tg3_flag(tp, WOL_CAP))
4124                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4125                                                      WOL_DRV_STATE_SHUTDOWN |
4126                                                      WOL_DRV_WOL |
4127                                                      WOL_SET_MAGIC_PKT);
4128
4129         if (device_should_wake) {
4130                 u32 mac_mode;
4131
4132                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4133                         if (do_low_power &&
4134                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4135                                 tg3_phy_auxctl_write(tp,
4136                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4137                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4138                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4139                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4140                                 udelay(40);
4141                         }
4142
4143                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4144                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4145                         else if (tp->phy_flags &
4146                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4147                                 if (tp->link_config.active_speed == SPEED_1000)
4148                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4149                                 else
4150                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4151                         } else
4152                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4153
4154                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4155                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4156                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4157                                              SPEED_100 : SPEED_10;
4158                                 if (tg3_5700_link_polarity(tp, speed))
4159                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4160                                 else
4161                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4162                         }
4163                 } else {
4164                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4165                 }
4166
4167                 if (!tg3_flag(tp, 5750_PLUS))
4168                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4169
4170                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4171                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4172                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4173                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4174
4175                 if (tg3_flag(tp, ENABLE_APE))
4176                         mac_mode |= MAC_MODE_APE_TX_EN |
4177                                     MAC_MODE_APE_RX_EN |
4178                                     MAC_MODE_TDE_ENABLE;
4179
4180                 tw32_f(MAC_MODE, mac_mode);
4181                 udelay(100);
4182
4183                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4184                 udelay(10);
4185         }
4186
4187         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4188             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4189              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4190                 u32 base_val;
4191
4192                 base_val = tp->pci_clock_ctrl;
4193                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4194                              CLOCK_CTRL_TXCLK_DISABLE);
4195
4196                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4197                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4198         } else if (tg3_flag(tp, 5780_CLASS) ||
4199                    tg3_flag(tp, CPMU_PRESENT) ||
4200                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4201                 /* do nothing */
4202         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4203                 u32 newbits1, newbits2;
4204
4205                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4206                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4207                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4208                                     CLOCK_CTRL_TXCLK_DISABLE |
4209                                     CLOCK_CTRL_ALTCLK);
4210                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4211                 } else if (tg3_flag(tp, 5705_PLUS)) {
4212                         newbits1 = CLOCK_CTRL_625_CORE;
4213                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4214                 } else {
4215                         newbits1 = CLOCK_CTRL_ALTCLK;
4216                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4217                 }
4218
4219                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4220                             40);
4221
4222                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4223                             40);
4224
4225                 if (!tg3_flag(tp, 5705_PLUS)) {
4226                         u32 newbits3;
4227
4228                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4229                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4230                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4231                                             CLOCK_CTRL_TXCLK_DISABLE |
4232                                             CLOCK_CTRL_44MHZ_CORE);
4233                         } else {
4234                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4235                         }
4236
4237                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4238                                     tp->pci_clock_ctrl | newbits3, 40);
4239                 }
4240         }
4241
4242         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4243                 tg3_power_down_phy(tp, do_low_power);
4244
4245         tg3_frob_aux_power(tp, true);
4246
4247         /* Workaround for unstable PLL clock */
4248         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4249             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4250              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4251                 u32 val = tr32(0x7d00);
4252
4253                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4254                 tw32(0x7d00, val);
4255                 if (!tg3_flag(tp, ENABLE_ASF)) {
4256                         int err;
4257
4258                         err = tg3_nvram_lock(tp);
4259                         tg3_halt_cpu(tp, RX_CPU_BASE);
4260                         if (!err)
4261                                 tg3_nvram_unlock(tp);
4262                 }
4263         }
4264
4265         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4266
4267         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4268
4269         return 0;
4270 }
4271
4272 static void tg3_power_down(struct tg3 *tp)
4273 {
4274         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4275         pci_set_power_state(tp->pdev, PCI_D3hot);
4276 }
4277
4278 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4279 {
4280         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4281         case MII_TG3_AUX_STAT_10HALF:
4282                 *speed = SPEED_10;
4283                 *duplex = DUPLEX_HALF;
4284                 break;
4285
4286         case MII_TG3_AUX_STAT_10FULL:
4287                 *speed = SPEED_10;
4288                 *duplex = DUPLEX_FULL;
4289                 break;
4290
4291         case MII_TG3_AUX_STAT_100HALF:
4292                 *speed = SPEED_100;
4293                 *duplex = DUPLEX_HALF;
4294                 break;
4295
4296         case MII_TG3_AUX_STAT_100FULL:
4297                 *speed = SPEED_100;
4298                 *duplex = DUPLEX_FULL;
4299                 break;
4300
4301         case MII_TG3_AUX_STAT_1000HALF:
4302                 *speed = SPEED_1000;
4303                 *duplex = DUPLEX_HALF;
4304                 break;
4305
4306         case MII_TG3_AUX_STAT_1000FULL:
4307                 *speed = SPEED_1000;
4308                 *duplex = DUPLEX_FULL;
4309                 break;
4310
4311         default:
4312                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4313                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4314                                  SPEED_10;
4315                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4316                                   DUPLEX_HALF;
4317                         break;
4318                 }
4319                 *speed = SPEED_UNKNOWN;
4320                 *duplex = DUPLEX_UNKNOWN;
4321                 break;
4322         }
4323 }
4324
4325 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4326 {
4327         int err = 0;
4328         u32 val, new_adv;
4329
4330         new_adv = ADVERTISE_CSMA;
4331         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4332         new_adv |= mii_advertise_flowctrl(flowctrl);
4333
4334         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4335         if (err)
4336                 goto done;
4337
4338         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4339                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4340
4341                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4342                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4343                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4344
4345                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4346                 if (err)
4347                         goto done;
4348         }
4349
4350         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4351                 goto done;
4352
4353         tw32(TG3_CPMU_EEE_MODE,
4354              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4355
4356         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4357         if (!err) {
4358                 u32 err2;
4359
4360                 val = 0;
4361                 /* Advertise 100-BaseTX EEE ability */
4362                 if (advertise & ADVERTISED_100baseT_Full)
4363                         val |= MDIO_AN_EEE_ADV_100TX;
4364                 /* Advertise 1000-BaseT EEE ability */
4365                 if (advertise & ADVERTISED_1000baseT_Full)
4366                         val |= MDIO_AN_EEE_ADV_1000T;
4367
4368                 if (!tp->eee.eee_enabled) {
4369                         val = 0;
4370                         tp->eee.advertised = 0;
4371                 } else {
4372                         tp->eee.advertised = advertise &
4373                                              (ADVERTISED_100baseT_Full |
4374                                               ADVERTISED_1000baseT_Full);
4375                 }
4376
4377                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4378                 if (err)
4379                         val = 0;
4380
4381                 switch (tg3_asic_rev(tp)) {
4382                 case ASIC_REV_5717:
4383                 case ASIC_REV_57765:
4384                 case ASIC_REV_57766:
4385                 case ASIC_REV_5719:
4386                         /* If we advertised any eee advertisements above... */
4387                         if (val)
4388                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4389                                       MII_TG3_DSP_TAP26_RMRXSTO |
4390                                       MII_TG3_DSP_TAP26_OPCSINPT;
4391                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4392                         /* Fall through */
4393                 case ASIC_REV_5720:
4394                 case ASIC_REV_5762:
4395                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4396                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4397                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4398                 }
4399
4400                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4401                 if (!err)
4402                         err = err2;
4403         }
4404
4405 done:
4406         return err;
4407 }
4408
4409 static void tg3_phy_copper_begin(struct tg3 *tp)
4410 {
4411         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4412             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4413                 u32 adv, fc;
4414
4415                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4416                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4417                         adv = ADVERTISED_10baseT_Half |
4418                               ADVERTISED_10baseT_Full;
4419                         if (tg3_flag(tp, WOL_SPEED_100MB))
4420                                 adv |= ADVERTISED_100baseT_Half |
4421                                        ADVERTISED_100baseT_Full;
4422                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4423                                 if (!(tp->phy_flags &
4424                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4425                                         adv |= ADVERTISED_1000baseT_Half;
4426                                 adv |= ADVERTISED_1000baseT_Full;
4427                         }
4428
4429                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4430                 } else {
4431                         adv = tp->link_config.advertising;
4432                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4433                                 adv &= ~(ADVERTISED_1000baseT_Half |
4434                                          ADVERTISED_1000baseT_Full);
4435
4436                         fc = tp->link_config.flowctrl;
4437                 }
4438
4439                 tg3_phy_autoneg_cfg(tp, adv, fc);
4440
4441                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4442                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4443                         /* Normally during power down we want to autonegotiate
4444                          * the lowest possible speed for WOL. However, to avoid
4445                          * link flap, we leave it untouched.
4446                          */
4447                         return;
4448                 }
4449
4450                 tg3_writephy(tp, MII_BMCR,
4451                              BMCR_ANENABLE | BMCR_ANRESTART);
4452         } else {
4453                 int i;
4454                 u32 bmcr, orig_bmcr;
4455
4456                 tp->link_config.active_speed = tp->link_config.speed;
4457                 tp->link_config.active_duplex = tp->link_config.duplex;
4458
4459                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4460                         /* With autoneg disabled, 5715 only links up when the
4461                          * advertisement register has the configured speed
4462                          * enabled.
4463                          */
4464                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4465                 }
4466
4467                 bmcr = 0;
4468                 switch (tp->link_config.speed) {
4469                 default:
4470                 case SPEED_10:
4471                         break;
4472
4473                 case SPEED_100:
4474                         bmcr |= BMCR_SPEED100;
4475                         break;
4476
4477                 case SPEED_1000:
4478                         bmcr |= BMCR_SPEED1000;
4479                         break;
4480                 }
4481
4482                 if (tp->link_config.duplex == DUPLEX_FULL)
4483                         bmcr |= BMCR_FULLDPLX;
4484
4485                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4486                     (bmcr != orig_bmcr)) {
4487                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4488                         for (i = 0; i < 1500; i++) {
4489                                 u32 tmp;
4490
4491                                 udelay(10);
4492                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4493                                     tg3_readphy(tp, MII_BMSR, &tmp))
4494                                         continue;
4495                                 if (!(tmp & BMSR_LSTATUS)) {
4496                                         udelay(40);
4497                                         break;
4498                                 }
4499                         }
4500                         tg3_writephy(tp, MII_BMCR, bmcr);
4501                         udelay(40);
4502                 }
4503         }
4504 }
4505
4506 static int tg3_phy_pull_config(struct tg3 *tp)
4507 {
4508         int err;
4509         u32 val;
4510
4511         err = tg3_readphy(tp, MII_BMCR, &val);
4512         if (err)
4513                 goto done;
4514
4515         if (!(val & BMCR_ANENABLE)) {
4516                 tp->link_config.autoneg = AUTONEG_DISABLE;
4517                 tp->link_config.advertising = 0;
4518                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4519
4520                 err = -EIO;
4521
4522                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4523                 case 0:
4524                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4525                                 goto done;
4526
4527                         tp->link_config.speed = SPEED_10;
4528                         break;
4529                 case BMCR_SPEED100:
4530                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4531                                 goto done;
4532
4533                         tp->link_config.speed = SPEED_100;
4534                         break;
4535                 case BMCR_SPEED1000:
4536                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4537                                 tp->link_config.speed = SPEED_1000;
4538                                 break;
4539                         }
4540                         /* Fall through */
4541                 default:
4542                         goto done;
4543                 }
4544
4545                 if (val & BMCR_FULLDPLX)
4546                         tp->link_config.duplex = DUPLEX_FULL;
4547                 else
4548                         tp->link_config.duplex = DUPLEX_HALF;
4549
4550                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4551
4552                 err = 0;
4553                 goto done;
4554         }
4555
4556         tp->link_config.autoneg = AUTONEG_ENABLE;
4557         tp->link_config.advertising = ADVERTISED_Autoneg;
4558         tg3_flag_set(tp, PAUSE_AUTONEG);
4559
4560         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4561                 u32 adv;
4562
4563                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4564                 if (err)
4565                         goto done;
4566
4567                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4568                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4569
4570                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4571         } else {
4572                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4573         }
4574
4575         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4576                 u32 adv;
4577
4578                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4579                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4580                         if (err)
4581                                 goto done;
4582
4583                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4584                 } else {
4585                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4586                         if (err)
4587                                 goto done;
4588
4589                         adv = tg3_decode_flowctrl_1000X(val);
4590                         tp->link_config.flowctrl = adv;
4591
4592                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4593                         adv = mii_adv_to_ethtool_adv_x(val);
4594                 }
4595
4596                 tp->link_config.advertising |= adv;
4597         }
4598
4599 done:
4600         return err;
4601 }
4602
4603 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4604 {
4605         int err;
4606
4607         /* Turn off tap power management. */
4608         /* Set Extended packet length bit */
4609         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4610
4611         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4612         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4613         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4614         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4615         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4616
4617         udelay(40);
4618
4619         return err;
4620 }
4621
4622 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4623 {
4624         struct ethtool_eee eee;
4625
4626         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4627                 return true;
4628
4629         tg3_eee_pull_config(tp, &eee);
4630
4631         if (tp->eee.eee_enabled) {
4632                 if (tp->eee.advertised != eee.advertised ||
4633                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4634                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4635                         return false;
4636         } else {
4637                 /* EEE is disabled but we're advertising */
4638                 if (eee.advertised)
4639                         return false;
4640         }
4641
4642         return true;
4643 }
4644
4645 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4646 {
4647         u32 advmsk, tgtadv, advertising;
4648
4649         advertising = tp->link_config.advertising;
4650         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4651
4652         advmsk = ADVERTISE_ALL;
4653         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4654                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4655                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4656         }
4657
4658         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4659                 return false;
4660
4661         if ((*lcladv & advmsk) != tgtadv)
4662                 return false;
4663
4664         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4665                 u32 tg3_ctrl;
4666
4667                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4668
4669                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4670                         return false;
4671
4672                 if (tgtadv &&
4673                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4674                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4675                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4676                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4677                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4678                 } else {
4679                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4680                 }
4681
4682                 if (tg3_ctrl != tgtadv)
4683                         return false;
4684         }
4685
4686         return true;
4687 }
4688
4689 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4690 {
4691         u32 lpeth = 0;
4692
4693         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4694                 u32 val;
4695
4696                 if (tg3_readphy(tp, MII_STAT1000, &val))
4697                         return false;
4698
4699                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4700         }
4701
4702         if (tg3_readphy(tp, MII_LPA, rmtadv))
4703                 return false;
4704
4705         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4706         tp->link_config.rmt_adv = lpeth;
4707
4708         return true;
4709 }
4710
4711 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4712 {
4713         if (curr_link_up != tp->link_up) {
4714                 if (curr_link_up) {
4715                         netif_carrier_on(tp->dev);
4716                 } else {
4717                         netif_carrier_off(tp->dev);
4718                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4719                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4720                 }
4721
4722                 tg3_link_report(tp);
4723                 return true;
4724         }
4725
4726         return false;
4727 }
4728
4729 static void tg3_clear_mac_status(struct tg3 *tp)
4730 {
4731         tw32(MAC_EVENT, 0);
4732
4733         tw32_f(MAC_STATUS,
4734                MAC_STATUS_SYNC_CHANGED |
4735                MAC_STATUS_CFG_CHANGED |
4736                MAC_STATUS_MI_COMPLETION |
4737                MAC_STATUS_LNKSTATE_CHANGED);
4738         udelay(40);
4739 }
4740
4741 static void tg3_setup_eee(struct tg3 *tp)
4742 {
4743         u32 val;
4744
4745         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4746               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4747         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4748                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4749
4750         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4751
4752         tw32_f(TG3_CPMU_EEE_CTRL,
4753                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4754
4755         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4756               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4757               TG3_CPMU_EEEMD_LPI_IN_RX |
4758               TG3_CPMU_EEEMD_EEE_ENABLE;
4759
4760         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4761                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4762
4763         if (tg3_flag(tp, ENABLE_APE))
4764                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4765
4766         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4767
4768         tw32_f(TG3_CPMU_EEE_DBTMR1,
4769                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4770                (tp->eee.tx_lpi_timer & 0xffff));
4771
4772         tw32_f(TG3_CPMU_EEE_DBTMR2,
4773                TG3_CPMU_DBTMR2_APE_TX_2047US |
4774                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4775 }
4776
4777 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4778 {
4779         bool current_link_up;
4780         u32 bmsr, val;
4781         u32 lcl_adv, rmt_adv;
4782         u16 current_speed;
4783         u8 current_duplex;
4784         int i, err;
4785
4786         tg3_clear_mac_status(tp);
4787
4788         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4789                 tw32_f(MAC_MI_MODE,
4790                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4791                 udelay(80);
4792         }
4793
4794         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4795
4796         /* Some third-party PHYs need to be reset on link going
4797          * down.
4798          */
4799         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4800              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4801              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4802             tp->link_up) {
4803                 tg3_readphy(tp, MII_BMSR, &bmsr);
4804                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4805                     !(bmsr & BMSR_LSTATUS))
4806                         force_reset = true;
4807         }
4808         if (force_reset)
4809                 tg3_phy_reset(tp);
4810
4811         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4812                 tg3_readphy(tp, MII_BMSR, &bmsr);
4813                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4814                     !tg3_flag(tp, INIT_COMPLETE))
4815                         bmsr = 0;
4816
4817                 if (!(bmsr & BMSR_LSTATUS)) {
4818                         err = tg3_init_5401phy_dsp(tp);
4819                         if (err)
4820                                 return err;
4821
4822                         tg3_readphy(tp, MII_BMSR, &bmsr);
4823                         for (i = 0; i < 1000; i++) {
4824                                 udelay(10);
4825                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4826                                     (bmsr & BMSR_LSTATUS)) {
4827                                         udelay(40);
4828                                         break;
4829                                 }
4830                         }
4831
4832                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4833                             TG3_PHY_REV_BCM5401_B0 &&
4834                             !(bmsr & BMSR_LSTATUS) &&
4835                             tp->link_config.active_speed == SPEED_1000) {
4836                                 err = tg3_phy_reset(tp);
4837                                 if (!err)
4838                                         err = tg3_init_5401phy_dsp(tp);
4839                                 if (err)
4840                                         return err;
4841                         }
4842                 }
4843         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4844                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4845                 /* 5701 {A0,B0} CRC bug workaround */
4846                 tg3_writephy(tp, 0x15, 0x0a75);
4847                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4849                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4850         }
4851
4852         /* Clear pending interrupts... */
4853         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4854         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4855
4856         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4857                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4858         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4859                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4860
4861         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4862             tg3_asic_rev(tp) == ASIC_REV_5701) {
4863                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4864                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4865                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4866                 else
4867                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4868         }
4869
4870         current_link_up = false;
4871         current_speed = SPEED_UNKNOWN;
4872         current_duplex = DUPLEX_UNKNOWN;
4873         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4874         tp->link_config.rmt_adv = 0;
4875
4876         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4877                 err = tg3_phy_auxctl_read(tp,
4878                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4879                                           &val);
4880                 if (!err && !(val & (1 << 10))) {
4881                         tg3_phy_auxctl_write(tp,
4882                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4883                                              val | (1 << 10));
4884                         goto relink;
4885                 }
4886         }
4887
4888         bmsr = 0;
4889         for (i = 0; i < 100; i++) {
4890                 tg3_readphy(tp, MII_BMSR, &bmsr);
4891                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4892                     (bmsr & BMSR_LSTATUS))
4893                         break;
4894                 udelay(40);
4895         }
4896
4897         if (bmsr & BMSR_LSTATUS) {
4898                 u32 aux_stat, bmcr;
4899
4900                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4901                 for (i = 0; i < 2000; i++) {
4902                         udelay(10);
4903                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4904                             aux_stat)
4905                                 break;
4906                 }
4907
4908                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4909                                              &current_speed,
4910                                              &current_duplex);
4911
4912                 bmcr = 0;
4913                 for (i = 0; i < 200; i++) {
4914                         tg3_readphy(tp, MII_BMCR, &bmcr);
4915                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4916                                 continue;
4917                         if (bmcr && bmcr != 0x7fff)
4918                                 break;
4919                         udelay(10);
4920                 }
4921
4922                 lcl_adv = 0;
4923                 rmt_adv = 0;
4924
4925                 tp->link_config.active_speed = current_speed;
4926                 tp->link_config.active_duplex = current_duplex;
4927
4928                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4929                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4930
4931                         if ((bmcr & BMCR_ANENABLE) &&
4932                             eee_config_ok &&
4933                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4934                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4935                                 current_link_up = true;
4936
4937                         /* EEE settings changes take effect only after a phy
4938                          * reset.  If we have skipped a reset due to Link Flap
4939                          * Avoidance being enabled, do it now.
4940                          */
4941                         if (!eee_config_ok &&
4942                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4943                             !force_reset) {
4944                                 tg3_setup_eee(tp);
4945                                 tg3_phy_reset(tp);
4946                         }
4947                 } else {
4948                         if (!(bmcr & BMCR_ANENABLE) &&
4949                             tp->link_config.speed == current_speed &&
4950                             tp->link_config.duplex == current_duplex) {
4951                                 current_link_up = true;
4952                         }
4953                 }
4954
4955                 if (current_link_up &&
4956                     tp->link_config.active_duplex == DUPLEX_FULL) {
4957                         u32 reg, bit;
4958
4959                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4960                                 reg = MII_TG3_FET_GEN_STAT;
4961                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4962                         } else {
4963                                 reg = MII_TG3_EXT_STAT;
4964                                 bit = MII_TG3_EXT_STAT_MDIX;
4965                         }
4966
4967                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4968                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4969
4970                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4971                 }
4972         }
4973
4974 relink:
4975         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4976                 tg3_phy_copper_begin(tp);
4977
4978                 if (tg3_flag(tp, ROBOSWITCH)) {
4979                         current_link_up = true;
4980                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4981                         current_speed = SPEED_1000;
4982                         current_duplex = DUPLEX_FULL;
4983                         tp->link_config.active_speed = current_speed;
4984                         tp->link_config.active_duplex = current_duplex;
4985                 }
4986
4987                 tg3_readphy(tp, MII_BMSR, &bmsr);
4988                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4989                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4990                         current_link_up = true;
4991         }
4992
4993         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4994         if (current_link_up) {
4995                 if (tp->link_config.active_speed == SPEED_100 ||
4996                     tp->link_config.active_speed == SPEED_10)
4997                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4998                 else
4999                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5001                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5002         else
5003                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5004
5005         /* In order for the 5750 core in BCM4785 chip to work properly
5006          * in RGMII mode, the Led Control Register must be set up.
5007          */
5008         if (tg3_flag(tp, RGMII_MODE)) {
5009                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5010                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5011
5012                 if (tp->link_config.active_speed == SPEED_10)
5013                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5014                 else if (tp->link_config.active_speed == SPEED_100)
5015                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016                                      LED_CTRL_100MBPS_ON);
5017                 else if (tp->link_config.active_speed == SPEED_1000)
5018                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5019                                      LED_CTRL_1000MBPS_ON);
5020
5021                 tw32(MAC_LED_CTRL, led_ctrl);
5022                 udelay(40);
5023         }
5024
5025         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5026         if (tp->link_config.active_duplex == DUPLEX_HALF)
5027                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5028
5029         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5030                 if (current_link_up &&
5031                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5032                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5033                 else
5034                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5035         }
5036
5037         /* ??? Without this setting Netgear GA302T PHY does not
5038          * ??? send/receive packets...
5039          */
5040         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5041             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5042                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5043                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5044                 udelay(80);
5045         }
5046
5047         tw32_f(MAC_MODE, tp->mac_mode);
5048         udelay(40);
5049
5050         tg3_phy_eee_adjust(tp, current_link_up);
5051
5052         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5053                 /* Polled via timer. */
5054                 tw32_f(MAC_EVENT, 0);
5055         } else {
5056                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5057         }
5058         udelay(40);
5059
5060         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5061             current_link_up &&
5062             tp->link_config.active_speed == SPEED_1000 &&
5063             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5064                 udelay(120);
5065                 tw32_f(MAC_STATUS,
5066                      (MAC_STATUS_SYNC_CHANGED |
5067                       MAC_STATUS_CFG_CHANGED));
5068                 udelay(40);
5069                 tg3_write_mem(tp,
5070                               NIC_SRAM_FIRMWARE_MBOX,
5071                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5072         }
5073
5074         /* Prevent send BD corruption. */
5075         if (tg3_flag(tp, CLKREQ_BUG)) {
5076                 if (tp->link_config.active_speed == SPEED_100 ||
5077                     tp->link_config.active_speed == SPEED_10)
5078                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5079                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5080                 else
5081                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5082                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5083         }
5084
5085         tg3_test_and_report_link_chg(tp, current_link_up);
5086
5087         return 0;
5088 }
5089
5090 struct tg3_fiber_aneginfo {
5091         int state;
5092 #define ANEG_STATE_UNKNOWN              0
5093 #define ANEG_STATE_AN_ENABLE            1
5094 #define ANEG_STATE_RESTART_INIT         2
5095 #define ANEG_STATE_RESTART              3
5096 #define ANEG_STATE_DISABLE_LINK_OK      4
5097 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5098 #define ANEG_STATE_ABILITY_DETECT       6
5099 #define ANEG_STATE_ACK_DETECT_INIT      7
5100 #define ANEG_STATE_ACK_DETECT           8
5101 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5102 #define ANEG_STATE_COMPLETE_ACK         10
5103 #define ANEG_STATE_IDLE_DETECT_INIT     11
5104 #define ANEG_STATE_IDLE_DETECT          12
5105 #define ANEG_STATE_LINK_OK              13
5106 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5107 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5108
5109         u32 flags;
5110 #define MR_AN_ENABLE            0x00000001
5111 #define MR_RESTART_AN           0x00000002
5112 #define MR_AN_COMPLETE          0x00000004
5113 #define MR_PAGE_RX              0x00000008
5114 #define MR_NP_LOADED            0x00000010
5115 #define MR_TOGGLE_TX            0x00000020
5116 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5117 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5118 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5119 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5120 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5121 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5122 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5123 #define MR_TOGGLE_RX            0x00002000
5124 #define MR_NP_RX                0x00004000
5125
5126 #define MR_LINK_OK              0x80000000
5127
5128         unsigned long link_time, cur_time;
5129
5130         u32 ability_match_cfg;
5131         int ability_match_count;
5132
5133         char ability_match, idle_match, ack_match;
5134
5135         u32 txconfig, rxconfig;
5136 #define ANEG_CFG_NP             0x00000080
5137 #define ANEG_CFG_ACK            0x00000040
5138 #define ANEG_CFG_RF2            0x00000020
5139 #define ANEG_CFG_RF1            0x00000010
5140 #define ANEG_CFG_PS2            0x00000001
5141 #define ANEG_CFG_PS1            0x00008000
5142 #define ANEG_CFG_HD             0x00004000
5143 #define ANEG_CFG_FD             0x00002000
5144 #define ANEG_CFG_INVAL          0x00001f06
5145
5146 };
5147 #define ANEG_OK         0
5148 #define ANEG_DONE       1
5149 #define ANEG_TIMER_ENAB 2
5150 #define ANEG_FAILED     -1
5151
5152 #define ANEG_STATE_SETTLE_TIME  10000
5153
5154 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5155                                    struct tg3_fiber_aneginfo *ap)
5156 {
5157         u16 flowctrl;
5158         unsigned long delta;
5159         u32 rx_cfg_reg;
5160         int ret;
5161
5162         if (ap->state == ANEG_STATE_UNKNOWN) {
5163                 ap->rxconfig = 0;
5164                 ap->link_time = 0;
5165                 ap->cur_time = 0;
5166                 ap->ability_match_cfg = 0;
5167                 ap->ability_match_count = 0;
5168                 ap->ability_match = 0;
5169                 ap->idle_match = 0;
5170                 ap->ack_match = 0;
5171         }
5172         ap->cur_time++;
5173
5174         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5175                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5176
5177                 if (rx_cfg_reg != ap->ability_match_cfg) {
5178                         ap->ability_match_cfg = rx_cfg_reg;
5179                         ap->ability_match = 0;
5180                         ap->ability_match_count = 0;
5181                 } else {
5182                         if (++ap->ability_match_count > 1) {
5183                                 ap->ability_match = 1;
5184                                 ap->ability_match_cfg = rx_cfg_reg;
5185                         }
5186                 }
5187                 if (rx_cfg_reg & ANEG_CFG_ACK)
5188                         ap->ack_match = 1;
5189                 else
5190                         ap->ack_match = 0;
5191
5192                 ap->idle_match = 0;
5193         } else {
5194                 ap->idle_match = 1;
5195                 ap->ability_match_cfg = 0;
5196                 ap->ability_match_count = 0;
5197                 ap->ability_match = 0;
5198                 ap->ack_match = 0;
5199
5200                 rx_cfg_reg = 0;
5201         }
5202
5203         ap->rxconfig = rx_cfg_reg;
5204         ret = ANEG_OK;
5205
5206         switch (ap->state) {
5207         case ANEG_STATE_UNKNOWN:
5208                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5209                         ap->state = ANEG_STATE_AN_ENABLE;
5210
5211                 /* fallthru */
5212         case ANEG_STATE_AN_ENABLE:
5213                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5214                 if (ap->flags & MR_AN_ENABLE) {
5215                         ap->link_time = 0;
5216                         ap->cur_time = 0;
5217                         ap->ability_match_cfg = 0;
5218                         ap->ability_match_count = 0;
5219                         ap->ability_match = 0;
5220                         ap->idle_match = 0;
5221                         ap->ack_match = 0;
5222
5223                         ap->state = ANEG_STATE_RESTART_INIT;
5224                 } else {
5225                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5226                 }
5227                 break;
5228
5229         case ANEG_STATE_RESTART_INIT:
5230                 ap->link_time = ap->cur_time;
5231                 ap->flags &= ~(MR_NP_LOADED);
5232                 ap->txconfig = 0;
5233                 tw32(MAC_TX_AUTO_NEG, 0);
5234                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5235                 tw32_f(MAC_MODE, tp->mac_mode);
5236                 udelay(40);
5237
5238                 ret = ANEG_TIMER_ENAB;
5239                 ap->state = ANEG_STATE_RESTART;
5240
5241                 /* fallthru */
5242         case ANEG_STATE_RESTART:
5243                 delta = ap->cur_time - ap->link_time;
5244                 if (delta > ANEG_STATE_SETTLE_TIME)
5245                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5246                 else
5247                         ret = ANEG_TIMER_ENAB;
5248                 break;
5249
5250         case ANEG_STATE_DISABLE_LINK_OK:
5251                 ret = ANEG_DONE;
5252                 break;
5253
5254         case ANEG_STATE_ABILITY_DETECT_INIT:
5255                 ap->flags &= ~(MR_TOGGLE_TX);
5256                 ap->txconfig = ANEG_CFG_FD;
5257                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5258                 if (flowctrl & ADVERTISE_1000XPAUSE)
5259                         ap->txconfig |= ANEG_CFG_PS1;
5260                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5261                         ap->txconfig |= ANEG_CFG_PS2;
5262                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5263                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5264                 tw32_f(MAC_MODE, tp->mac_mode);
5265                 udelay(40);
5266
5267                 ap->state = ANEG_STATE_ABILITY_DETECT;
5268                 break;
5269
5270         case ANEG_STATE_ABILITY_DETECT:
5271                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5272                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5273                 break;
5274
5275         case ANEG_STATE_ACK_DETECT_INIT:
5276                 ap->txconfig |= ANEG_CFG_ACK;
5277                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5278                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5279                 tw32_f(MAC_MODE, tp->mac_mode);
5280                 udelay(40);
5281
5282                 ap->state = ANEG_STATE_ACK_DETECT;
5283
5284                 /* fallthru */
5285         case ANEG_STATE_ACK_DETECT:
5286                 if (ap->ack_match != 0) {
5287                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5288                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5289                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5290                         } else {
5291                                 ap->state = ANEG_STATE_AN_ENABLE;
5292                         }
5293                 } else if (ap->ability_match != 0 &&
5294                            ap->rxconfig == 0) {
5295                         ap->state = ANEG_STATE_AN_ENABLE;
5296                 }
5297                 break;
5298
5299         case ANEG_STATE_COMPLETE_ACK_INIT:
5300                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5301                         ret = ANEG_FAILED;
5302                         break;
5303                 }
5304                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5305                                MR_LP_ADV_HALF_DUPLEX |
5306                                MR_LP_ADV_SYM_PAUSE |
5307                                MR_LP_ADV_ASYM_PAUSE |
5308                                MR_LP_ADV_REMOTE_FAULT1 |
5309                                MR_LP_ADV_REMOTE_FAULT2 |
5310                                MR_LP_ADV_NEXT_PAGE |
5311                                MR_TOGGLE_RX |
5312                                MR_NP_RX);
5313                 if (ap->rxconfig & ANEG_CFG_FD)
5314                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5315                 if (ap->rxconfig & ANEG_CFG_HD)
5316                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5317                 if (ap->rxconfig & ANEG_CFG_PS1)
5318                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5319                 if (ap->rxconfig & ANEG_CFG_PS2)
5320                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5321                 if (ap->rxconfig & ANEG_CFG_RF1)
5322                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5323                 if (ap->rxconfig & ANEG_CFG_RF2)
5324                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5325                 if (ap->rxconfig & ANEG_CFG_NP)
5326                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5327
5328                 ap->link_time = ap->cur_time;
5329
5330                 ap->flags ^= (MR_TOGGLE_TX);
5331                 if (ap->rxconfig & 0x0008)
5332                         ap->flags |= MR_TOGGLE_RX;
5333                 if (ap->rxconfig & ANEG_CFG_NP)
5334                         ap->flags |= MR_NP_RX;
5335                 ap->flags |= MR_PAGE_RX;
5336
5337                 ap->state = ANEG_STATE_COMPLETE_ACK;
5338                 ret = ANEG_TIMER_ENAB;
5339                 break;
5340
5341         case ANEG_STATE_COMPLETE_ACK:
5342                 if (ap->ability_match != 0 &&
5343                     ap->rxconfig == 0) {
5344                         ap->state = ANEG_STATE_AN_ENABLE;
5345                         break;
5346                 }
5347                 delta = ap->cur_time - ap->link_time;
5348                 if (delta > ANEG_STATE_SETTLE_TIME) {
5349                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5350                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5351                         } else {
5352                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5353                                     !(ap->flags & MR_NP_RX)) {
5354                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5355                                 } else {
5356                                         ret = ANEG_FAILED;
5357                                 }
5358                         }
5359                 }
5360                 break;
5361
5362         case ANEG_STATE_IDLE_DETECT_INIT:
5363                 ap->link_time = ap->cur_time;
5364                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5365                 tw32_f(MAC_MODE, tp->mac_mode);
5366                 udelay(40);
5367
5368                 ap->state = ANEG_STATE_IDLE_DETECT;
5369                 ret = ANEG_TIMER_ENAB;
5370                 break;
5371
5372         case ANEG_STATE_IDLE_DETECT:
5373                 if (ap->ability_match != 0 &&
5374                     ap->rxconfig == 0) {
5375                         ap->state = ANEG_STATE_AN_ENABLE;
5376                         break;
5377                 }
5378                 delta = ap->cur_time - ap->link_time;
5379                 if (delta > ANEG_STATE_SETTLE_TIME) {
5380                         /* XXX another gem from the Broadcom driver :( */
5381                         ap->state = ANEG_STATE_LINK_OK;
5382                 }
5383                 break;
5384
5385         case ANEG_STATE_LINK_OK:
5386                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5387                 ret = ANEG_DONE;
5388                 break;
5389
5390         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5391                 /* ??? unimplemented */
5392                 break;
5393
5394         case ANEG_STATE_NEXT_PAGE_WAIT:
5395                 /* ??? unimplemented */
5396                 break;
5397
5398         default:
5399                 ret = ANEG_FAILED;
5400                 break;
5401         }
5402
5403         return ret;
5404 }
5405
5406 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5407 {
5408         int res = 0;
5409         struct tg3_fiber_aneginfo aninfo;
5410         int status = ANEG_FAILED;
5411         unsigned int tick;
5412         u32 tmp;
5413
5414         tw32_f(MAC_TX_AUTO_NEG, 0);
5415
5416         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5417         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5418         udelay(40);
5419
5420         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5421         udelay(40);
5422
5423         memset(&aninfo, 0, sizeof(aninfo));
5424         aninfo.flags |= MR_AN_ENABLE;
5425         aninfo.state = ANEG_STATE_UNKNOWN;
5426         aninfo.cur_time = 0;
5427         tick = 0;
5428         while (++tick < 195000) {
5429                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5430                 if (status == ANEG_DONE || status == ANEG_FAILED)
5431                         break;
5432
5433                 udelay(1);
5434         }
5435
5436         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5437         tw32_f(MAC_MODE, tp->mac_mode);
5438         udelay(40);
5439
5440         *txflags = aninfo.txconfig;
5441         *rxflags = aninfo.flags;
5442
5443         if (status == ANEG_DONE &&
5444             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5445                              MR_LP_ADV_FULL_DUPLEX)))
5446                 res = 1;
5447
5448         return res;
5449 }
5450
5451 static void tg3_init_bcm8002(struct tg3 *tp)
5452 {
5453         u32 mac_status = tr32(MAC_STATUS);
5454         int i;
5455
5456         /* Reset when initting first time or we have a link. */
5457         if (tg3_flag(tp, INIT_COMPLETE) &&
5458             !(mac_status & MAC_STATUS_PCS_SYNCED))
5459                 return;
5460
5461         /* Set PLL lock range. */
5462         tg3_writephy(tp, 0x16, 0x8007);
5463
5464         /* SW reset */
5465         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5466
5467         /* Wait for reset to complete. */
5468         /* XXX schedule_timeout() ... */
5469         for (i = 0; i < 500; i++)
5470                 udelay(10);
5471
5472         /* Config mode; select PMA/Ch 1 regs. */
5473         tg3_writephy(tp, 0x10, 0x8411);
5474
5475         /* Enable auto-lock and comdet, select txclk for tx. */
5476         tg3_writephy(tp, 0x11, 0x0a10);
5477
5478         tg3_writephy(tp, 0x18, 0x00a0);
5479         tg3_writephy(tp, 0x16, 0x41ff);
5480
5481         /* Assert and deassert POR. */
5482         tg3_writephy(tp, 0x13, 0x0400);
5483         udelay(40);
5484         tg3_writephy(tp, 0x13, 0x0000);
5485
5486         tg3_writephy(tp, 0x11, 0x0a50);
5487         udelay(40);
5488         tg3_writephy(tp, 0x11, 0x0a10);
5489
5490         /* Wait for signal to stabilize */
5491         /* XXX schedule_timeout() ... */
5492         for (i = 0; i < 15000; i++)
5493                 udelay(10);
5494
5495         /* Deselect the channel register so we can read the PHYID
5496          * later.
5497          */
5498         tg3_writephy(tp, 0x10, 0x8011);
5499 }
5500
5501 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5502 {
5503         u16 flowctrl;
5504         bool current_link_up;
5505         u32 sg_dig_ctrl, sg_dig_status;
5506         u32 serdes_cfg, expected_sg_dig_ctrl;
5507         int workaround, port_a;
5508
5509         serdes_cfg = 0;
5510         expected_sg_dig_ctrl = 0;
5511         workaround = 0;
5512         port_a = 1;
5513         current_link_up = false;
5514
5515         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5516             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5517                 workaround = 1;
5518                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5519                         port_a = 0;
5520
5521                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5522                 /* preserve bits 20-23 for voltage regulator */
5523                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5524         }
5525
5526         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5527
5528         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5529                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5530                         if (workaround) {
5531                                 u32 val = serdes_cfg;
5532
5533                                 if (port_a)
5534                                         val |= 0xc010000;
5535                                 else
5536                                         val |= 0x4010000;
5537                                 tw32_f(MAC_SERDES_CFG, val);
5538                         }
5539
5540                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5541                 }
5542                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5543                         tg3_setup_flow_control(tp, 0, 0);
5544                         current_link_up = true;
5545                 }
5546                 goto out;
5547         }
5548
5549         /* Want auto-negotiation.  */
5550         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5551
5552         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5553         if (flowctrl & ADVERTISE_1000XPAUSE)
5554                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5555         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5556                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5557
5558         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5559                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5560                     tp->serdes_counter &&
5561                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5562                                     MAC_STATUS_RCVD_CFG)) ==
5563                      MAC_STATUS_PCS_SYNCED)) {
5564                         tp->serdes_counter--;
5565                         current_link_up = true;
5566                         goto out;
5567                 }
5568 restart_autoneg:
5569                 if (workaround)
5570                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5571                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5572                 udelay(5);
5573                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5574
5575                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5576                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5577         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5578                                  MAC_STATUS_SIGNAL_DET)) {
5579                 sg_dig_status = tr32(SG_DIG_STATUS);
5580                 mac_status = tr32(MAC_STATUS);
5581
5582                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5583                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5584                         u32 local_adv = 0, remote_adv = 0;
5585
5586                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5587                                 local_adv |= ADVERTISE_1000XPAUSE;
5588                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5589                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5590
5591                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5592                                 remote_adv |= LPA_1000XPAUSE;
5593                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5594                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5595
5596                         tp->link_config.rmt_adv =
5597                                            mii_adv_to_ethtool_adv_x(remote_adv);
5598
5599                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5600                         current_link_up = true;
5601                         tp->serdes_counter = 0;
5602                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5603                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5604                         if (tp->serdes_counter)
5605                                 tp->serdes_counter--;
5606                         else {
5607                                 if (workaround) {
5608                                         u32 val = serdes_cfg;
5609
5610                                         if (port_a)
5611                                                 val |= 0xc010000;
5612                                         else
5613                                                 val |= 0x4010000;
5614
5615                                         tw32_f(MAC_SERDES_CFG, val);
5616                                 }
5617
5618                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5619                                 udelay(40);
5620
5621                                 /* Link parallel detection - link is up */
5622                                 /* only if we have PCS_SYNC and not */
5623                                 /* receiving config code words */
5624                                 mac_status = tr32(MAC_STATUS);
5625                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5626                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5627                                         tg3_setup_flow_control(tp, 0, 0);
5628                                         current_link_up = true;
5629                                         tp->phy_flags |=
5630                                                 TG3_PHYFLG_PARALLEL_DETECT;
5631                                         tp->serdes_counter =
5632                                                 SERDES_PARALLEL_DET_TIMEOUT;
5633                                 } else
5634                                         goto restart_autoneg;
5635                         }
5636                 }
5637         } else {
5638                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5639                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5640         }
5641
5642 out:
5643         return current_link_up;
5644 }
5645
5646 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5647 {
5648         bool current_link_up = false;
5649
5650         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5651                 goto out;
5652
5653         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5654                 u32 txflags, rxflags;
5655                 int i;
5656
5657                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5658                         u32 local_adv = 0, remote_adv = 0;
5659
5660                         if (txflags & ANEG_CFG_PS1)
5661                                 local_adv |= ADVERTISE_1000XPAUSE;
5662                         if (txflags & ANEG_CFG_PS2)
5663                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5664
5665                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5666                                 remote_adv |= LPA_1000XPAUSE;
5667                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5668                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5669
5670                         tp->link_config.rmt_adv =
5671                                            mii_adv_to_ethtool_adv_x(remote_adv);
5672
5673                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5674
5675                         current_link_up = true;
5676                 }
5677                 for (i = 0; i < 30; i++) {
5678                         udelay(20);
5679                         tw32_f(MAC_STATUS,
5680                                (MAC_STATUS_SYNC_CHANGED |
5681                                 MAC_STATUS_CFG_CHANGED));
5682                         udelay(40);
5683                         if ((tr32(MAC_STATUS) &
5684                              (MAC_STATUS_SYNC_CHANGED |
5685                               MAC_STATUS_CFG_CHANGED)) == 0)
5686                                 break;
5687                 }
5688
5689                 mac_status = tr32(MAC_STATUS);
5690                 if (!current_link_up &&
5691                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5692                     !(mac_status & MAC_STATUS_RCVD_CFG))
5693                         current_link_up = true;
5694         } else {
5695                 tg3_setup_flow_control(tp, 0, 0);
5696
5697                 /* Forcing 1000FD link up. */
5698                 current_link_up = true;
5699
5700                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5701                 udelay(40);
5702
5703                 tw32_f(MAC_MODE, tp->mac_mode);
5704                 udelay(40);
5705         }
5706
5707 out:
5708         return current_link_up;
5709 }
5710
5711 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5712 {
5713         u32 orig_pause_cfg;
5714         u16 orig_active_speed;
5715         u8 orig_active_duplex;
5716         u32 mac_status;
5717         bool current_link_up;
5718         int i;
5719
5720         orig_pause_cfg = tp->link_config.active_flowctrl;
5721         orig_active_speed = tp->link_config.active_speed;
5722         orig_active_duplex = tp->link_config.active_duplex;
5723
5724         if (!tg3_flag(tp, HW_AUTONEG) &&
5725             tp->link_up &&
5726             tg3_flag(tp, INIT_COMPLETE)) {
5727                 mac_status = tr32(MAC_STATUS);
5728                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5729                                MAC_STATUS_SIGNAL_DET |
5730                                MAC_STATUS_CFG_CHANGED |
5731                                MAC_STATUS_RCVD_CFG);
5732                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5733                                    MAC_STATUS_SIGNAL_DET)) {
5734                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5735                                             MAC_STATUS_CFG_CHANGED));
5736                         return 0;
5737                 }
5738         }
5739
5740         tw32_f(MAC_TX_AUTO_NEG, 0);
5741
5742         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5743         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5744         tw32_f(MAC_MODE, tp->mac_mode);
5745         udelay(40);
5746
5747         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5748                 tg3_init_bcm8002(tp);
5749
5750         /* Enable link change event even when serdes polling.  */
5751         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5752         udelay(40);
5753
5754         current_link_up = false;
5755         tp->link_config.rmt_adv = 0;
5756         mac_status = tr32(MAC_STATUS);
5757
5758         if (tg3_flag(tp, HW_AUTONEG))
5759                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5760         else
5761                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5762
5763         tp->napi[0].hw_status->status =
5764                 (SD_STATUS_UPDATED |
5765                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5766
5767         for (i = 0; i < 100; i++) {
5768                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5769                                     MAC_STATUS_CFG_CHANGED));
5770                 udelay(5);
5771                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5772                                          MAC_STATUS_CFG_CHANGED |
5773                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5774                         break;
5775         }
5776
5777         mac_status = tr32(MAC_STATUS);
5778         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5779                 current_link_up = false;
5780                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5781                     tp->serdes_counter == 0) {
5782                         tw32_f(MAC_MODE, (tp->mac_mode |
5783                                           MAC_MODE_SEND_CONFIGS));
5784                         udelay(1);
5785                         tw32_f(MAC_MODE, tp->mac_mode);
5786                 }
5787         }
5788
5789         if (current_link_up) {
5790                 tp->link_config.active_speed = SPEED_1000;
5791                 tp->link_config.active_duplex = DUPLEX_FULL;
5792                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793                                     LED_CTRL_LNKLED_OVERRIDE |
5794                                     LED_CTRL_1000MBPS_ON));
5795         } else {
5796                 tp->link_config.active_speed = SPEED_UNKNOWN;
5797                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5798                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5799                                     LED_CTRL_LNKLED_OVERRIDE |
5800                                     LED_CTRL_TRAFFIC_OVERRIDE));
5801         }
5802
5803         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5804                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5805                 if (orig_pause_cfg != now_pause_cfg ||
5806                     orig_active_speed != tp->link_config.active_speed ||
5807                     orig_active_duplex != tp->link_config.active_duplex)
5808                         tg3_link_report(tp);
5809         }
5810
5811         return 0;
5812 }
5813
5814 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5815 {
5816         int err = 0;
5817         u32 bmsr, bmcr;
5818         u16 current_speed = SPEED_UNKNOWN;
5819         u8 current_duplex = DUPLEX_UNKNOWN;
5820         bool current_link_up = false;
5821         u32 local_adv, remote_adv, sgsr;
5822
5823         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5824              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5825              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5826              (sgsr & SERDES_TG3_SGMII_MODE)) {
5827
5828                 if (force_reset)
5829                         tg3_phy_reset(tp);
5830
5831                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5832
5833                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5834                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835                 } else {
5836                         current_link_up = true;
5837                         if (sgsr & SERDES_TG3_SPEED_1000) {
5838                                 current_speed = SPEED_1000;
5839                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5840                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5841                                 current_speed = SPEED_100;
5842                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843                         } else {
5844                                 current_speed = SPEED_10;
5845                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5846                         }
5847
5848                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5849                                 current_duplex = DUPLEX_FULL;
5850                         else
5851                                 current_duplex = DUPLEX_HALF;
5852                 }
5853
5854                 tw32_f(MAC_MODE, tp->mac_mode);
5855                 udelay(40);
5856
5857                 tg3_clear_mac_status(tp);
5858
5859                 goto fiber_setup_done;
5860         }
5861
5862         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5863         tw32_f(MAC_MODE, tp->mac_mode);
5864         udelay(40);
5865
5866         tg3_clear_mac_status(tp);
5867
5868         if (force_reset)
5869                 tg3_phy_reset(tp);
5870
5871         tp->link_config.rmt_adv = 0;
5872
5873         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5874         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5876                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5877                         bmsr |= BMSR_LSTATUS;
5878                 else
5879                         bmsr &= ~BMSR_LSTATUS;
5880         }
5881
5882         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5883
5884         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5885             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5886                 /* do nothing, just check for link up at the end */
5887         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5888                 u32 adv, newadv;
5889
5890                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5891                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5892                                  ADVERTISE_1000XPAUSE |
5893                                  ADVERTISE_1000XPSE_ASYM |
5894                                  ADVERTISE_SLCT);
5895
5896                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5897                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5898
5899                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5900                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5901                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5902                         tg3_writephy(tp, MII_BMCR, bmcr);
5903
5904                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5905                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5906                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5907
5908                         return err;
5909                 }
5910         } else {
5911                 u32 new_bmcr;
5912
5913                 bmcr &= ~BMCR_SPEED1000;
5914                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5915
5916                 if (tp->link_config.duplex == DUPLEX_FULL)
5917                         new_bmcr |= BMCR_FULLDPLX;
5918
5919                 if (new_bmcr != bmcr) {
5920                         /* BMCR_SPEED1000 is a reserved bit that needs
5921                          * to be set on write.
5922                          */
5923                         new_bmcr |= BMCR_SPEED1000;
5924
5925                         /* Force a linkdown */
5926                         if (tp->link_up) {
5927                                 u32 adv;
5928
5929                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5930                                 adv &= ~(ADVERTISE_1000XFULL |
5931                                          ADVERTISE_1000XHALF |
5932                                          ADVERTISE_SLCT);
5933                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5934                                 tg3_writephy(tp, MII_BMCR, bmcr |
5935                                                            BMCR_ANRESTART |
5936                                                            BMCR_ANENABLE);
5937                                 udelay(10);
5938                                 tg3_carrier_off(tp);
5939                         }
5940                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5941                         bmcr = new_bmcr;
5942                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5943                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5944                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5945                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5946                                         bmsr |= BMSR_LSTATUS;
5947                                 else
5948                                         bmsr &= ~BMSR_LSTATUS;
5949                         }
5950                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5951                 }
5952         }
5953
5954         if (bmsr & BMSR_LSTATUS) {
5955                 current_speed = SPEED_1000;
5956                 current_link_up = true;
5957                 if (bmcr & BMCR_FULLDPLX)
5958                         current_duplex = DUPLEX_FULL;
5959                 else
5960                         current_duplex = DUPLEX_HALF;
5961
5962                 local_adv = 0;
5963                 remote_adv = 0;
5964
5965                 if (bmcr & BMCR_ANENABLE) {
5966                         u32 common;
5967
5968                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5969                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5970                         common = local_adv & remote_adv;
5971                         if (common & (ADVERTISE_1000XHALF |
5972                                       ADVERTISE_1000XFULL)) {
5973                                 if (common & ADVERTISE_1000XFULL)
5974                                         current_duplex = DUPLEX_FULL;
5975                                 else
5976                                         current_duplex = DUPLEX_HALF;
5977
5978                                 tp->link_config.rmt_adv =
5979                                            mii_adv_to_ethtool_adv_x(remote_adv);
5980                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5981                                 /* Link is up via parallel detect */
5982                         } else {
5983                                 current_link_up = false;
5984                         }
5985                 }
5986         }
5987
5988 fiber_setup_done:
5989         if (current_link_up && current_duplex == DUPLEX_FULL)
5990                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5991
5992         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5993         if (tp->link_config.active_duplex == DUPLEX_HALF)
5994                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5995
5996         tw32_f(MAC_MODE, tp->mac_mode);
5997         udelay(40);
5998
5999         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6000
6001         tp->link_config.active_speed = current_speed;
6002         tp->link_config.active_duplex = current_duplex;
6003
6004         tg3_test_and_report_link_chg(tp, current_link_up);
6005         return err;
6006 }
6007
6008 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6009 {
6010         if (tp->serdes_counter) {
6011                 /* Give autoneg time to complete. */
6012                 tp->serdes_counter--;
6013                 return;
6014         }
6015
6016         if (!tp->link_up &&
6017             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6018                 u32 bmcr;
6019
6020                 tg3_readphy(tp, MII_BMCR, &bmcr);
6021                 if (bmcr & BMCR_ANENABLE) {
6022                         u32 phy1, phy2;
6023
6024                         /* Select shadow register 0x1f */
6025                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6026                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6027
6028                         /* Select expansion interrupt status register */
6029                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6030                                          MII_TG3_DSP_EXP1_INT_STAT);
6031                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6032                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6033
6034                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6035                                 /* We have signal detect and not receiving
6036                                  * config code words, link is up by parallel
6037                                  * detection.
6038                                  */
6039
6040                                 bmcr &= ~BMCR_ANENABLE;
6041                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6042                                 tg3_writephy(tp, MII_BMCR, bmcr);
6043                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6044                         }
6045                 }
6046         } else if (tp->link_up &&
6047                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6048                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6049                 u32 phy2;
6050
6051                 /* Select expansion interrupt status register */
6052                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6053                                  MII_TG3_DSP_EXP1_INT_STAT);
6054                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6055                 if (phy2 & 0x20) {
6056                         u32 bmcr;
6057
6058                         /* Config code words received, turn on autoneg. */
6059                         tg3_readphy(tp, MII_BMCR, &bmcr);
6060                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6061
6062                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6063
6064                 }
6065         }
6066 }
6067
6068 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6069 {
6070         u32 val;
6071         int err;
6072
6073         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6074                 err = tg3_setup_fiber_phy(tp, force_reset);
6075         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6076                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6077         else
6078                 err = tg3_setup_copper_phy(tp, force_reset);
6079
6080         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6081                 u32 scale;
6082
6083                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6084                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6085                         scale = 65;
6086                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6087                         scale = 6;
6088                 else
6089                         scale = 12;
6090
6091                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6092                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6093                 tw32(GRC_MISC_CFG, val);
6094         }
6095
6096         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6097               (6 << TX_LENGTHS_IPG_SHIFT);
6098         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6099             tg3_asic_rev(tp) == ASIC_REV_5762)
6100                 val |= tr32(MAC_TX_LENGTHS) &
6101                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6102                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6103
6104         if (tp->link_config.active_speed == SPEED_1000 &&
6105             tp->link_config.active_duplex == DUPLEX_HALF)
6106                 tw32(MAC_TX_LENGTHS, val |
6107                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6108         else
6109                 tw32(MAC_TX_LENGTHS, val |
6110                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6111
6112         if (!tg3_flag(tp, 5705_PLUS)) {
6113                 if (tp->link_up) {
6114                         tw32(HOSTCC_STAT_COAL_TICKS,
6115                              tp->coal.stats_block_coalesce_usecs);
6116                 } else {
6117                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6118                 }
6119         }
6120
6121         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6122                 val = tr32(PCIE_PWR_MGMT_THRESH);
6123                 if (!tp->link_up)
6124                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6125                               tp->pwrmgmt_thresh;
6126                 else
6127                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6128                 tw32(PCIE_PWR_MGMT_THRESH, val);
6129         }
6130
6131         return err;
6132 }
6133
6134 /* tp->lock must be held */
6135 static u64 tg3_refclk_read(struct tg3 *tp)
6136 {
6137         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6138         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6139 }
6140
6141 /* tp->lock must be held */
6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6143 {
6144         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6145
6146         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6150 }
6151
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6155 {
6156         struct tg3 *tp = netdev_priv(dev);
6157
6158         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6160                                 SOF_TIMESTAMPING_SOFTWARE;
6161
6162         if (tg3_flag(tp, PTP_CAPABLE)) {
6163                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164                                         SOF_TIMESTAMPING_RX_HARDWARE |
6165                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6166         }
6167
6168         if (tp->ptp_clock)
6169                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6170         else
6171                 info->phc_index = -1;
6172
6173         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6174
6175         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6179         return 0;
6180 }
6181
6182 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6183 {
6184         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6185         bool neg_adj = false;
6186         u32 correction = 0;
6187
6188         if (ppb < 0) {
6189                 neg_adj = true;
6190                 ppb = -ppb;
6191         }
6192
6193         /* Frequency adjustment is performed using hardware with a 24 bit
6194          * accumulator and a programmable correction value. On each clk, the
6195          * correction value gets added to the accumulator and when it
6196          * overflows, the time counter is incremented/decremented.
6197          *
6198          * So conversion from ppb to correction value is
6199          *              ppb * (1 << 24) / 1000000000
6200          */
6201         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6202                      TG3_EAV_REF_CLK_CORRECT_MASK;
6203
6204         tg3_full_lock(tp, 0);
6205
6206         if (correction)
6207                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6208                      TG3_EAV_REF_CLK_CORRECT_EN |
6209                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6210         else
6211                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6212
6213         tg3_full_unlock(tp);
6214
6215         return 0;
6216 }
6217
6218 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6219 {
6220         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6221
6222         tg3_full_lock(tp, 0);
6223         tp->ptp_adjust += delta;
6224         tg3_full_unlock(tp);
6225
6226         return 0;
6227 }
6228
6229 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6230 {
6231         u64 ns;
6232         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6233
6234         tg3_full_lock(tp, 0);
6235         ns = tg3_refclk_read(tp);
6236         ns += tp->ptp_adjust;
6237         tg3_full_unlock(tp);
6238
6239         *ts = ns_to_timespec64(ns);
6240
6241         return 0;
6242 }
6243
6244 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6245                            const struct timespec64 *ts)
6246 {
6247         u64 ns;
6248         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6249
6250         ns = timespec64_to_ns(ts);
6251
6252         tg3_full_lock(tp, 0);
6253         tg3_refclk_write(tp, ns);
6254         tp->ptp_adjust = 0;
6255         tg3_full_unlock(tp);
6256
6257         return 0;
6258 }
6259
6260 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6261                           struct ptp_clock_request *rq, int on)
6262 {
6263         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6264         u32 clock_ctl;
6265         int rval = 0;
6266
6267         switch (rq->type) {
6268         case PTP_CLK_REQ_PEROUT:
6269                 if (rq->perout.index != 0)
6270                         return -EINVAL;
6271
6272                 tg3_full_lock(tp, 0);
6273                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6274                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6275
6276                 if (on) {
6277                         u64 nsec;
6278
6279                         nsec = rq->perout.start.sec * 1000000000ULL +
6280                                rq->perout.start.nsec;
6281
6282                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6283                                 netdev_warn(tp->dev,
6284                                             "Device supports only a one-shot timesync output, period must be 0\n");
6285                                 rval = -EINVAL;
6286                                 goto err_out;
6287                         }
6288
6289                         if (nsec & (1ULL << 63)) {
6290                                 netdev_warn(tp->dev,
6291                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6292                                 rval = -EINVAL;
6293                                 goto err_out;
6294                         }
6295
6296                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6297                         tw32(TG3_EAV_WATCHDOG0_MSB,
6298                              TG3_EAV_WATCHDOG0_EN |
6299                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6300
6301                         tw32(TG3_EAV_REF_CLCK_CTL,
6302                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6303                 } else {
6304                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6305                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6306                 }
6307
6308 err_out:
6309                 tg3_full_unlock(tp);
6310                 return rval;
6311
6312         default:
6313                 break;
6314         }
6315
6316         return -EOPNOTSUPP;
6317 }
6318
6319 static const struct ptp_clock_info tg3_ptp_caps = {
6320         .owner          = THIS_MODULE,
6321         .name           = "tg3 clock",
6322         .max_adj        = 250000000,
6323         .n_alarm        = 0,
6324         .n_ext_ts       = 0,
6325         .n_per_out      = 1,
6326         .n_pins         = 0,
6327         .pps            = 0,
6328         .adjfreq        = tg3_ptp_adjfreq,
6329         .adjtime        = tg3_ptp_adjtime,
6330         .gettime64      = tg3_ptp_gettime,
6331         .settime64      = tg3_ptp_settime,
6332         .enable         = tg3_ptp_enable,
6333 };
6334
6335 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6336                                      struct skb_shared_hwtstamps *timestamp)
6337 {
6338         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6339         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6340                                            tp->ptp_adjust);
6341 }
6342
6343 /* tp->lock must be held */
6344 static void tg3_ptp_init(struct tg3 *tp)
6345 {
6346         if (!tg3_flag(tp, PTP_CAPABLE))
6347                 return;
6348
6349         /* Initialize the hardware clock to the system time. */
6350         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6351         tp->ptp_adjust = 0;
6352         tp->ptp_info = tg3_ptp_caps;
6353 }
6354
6355 /* tp->lock must be held */
6356 static void tg3_ptp_resume(struct tg3 *tp)
6357 {
6358         if (!tg3_flag(tp, PTP_CAPABLE))
6359                 return;
6360
6361         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6362         tp->ptp_adjust = 0;
6363 }
6364
6365 static void tg3_ptp_fini(struct tg3 *tp)
6366 {
6367         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6368                 return;
6369
6370         ptp_clock_unregister(tp->ptp_clock);
6371         tp->ptp_clock = NULL;
6372         tp->ptp_adjust = 0;
6373 }
6374
6375 static inline int tg3_irq_sync(struct tg3 *tp)
6376 {
6377         return tp->irq_sync;
6378 }
6379
6380 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6381 {
6382         int i;
6383
6384         dst = (u32 *)((u8 *)dst + off);
6385         for (i = 0; i < len; i += sizeof(u32))
6386                 *dst++ = tr32(off + i);
6387 }
6388
6389 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6390 {
6391         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6392         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6393         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6394         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6395         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6396         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6397         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6398         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6399         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6400         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6401         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6402         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6403         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6404         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6405         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6406         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6407         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6408         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6409         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6410
6411         if (tg3_flag(tp, SUPPORT_MSIX))
6412                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6413
6414         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6415         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6416         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6417         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6418         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6419         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6420         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6421         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6422
6423         if (!tg3_flag(tp, 5705_PLUS)) {
6424                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6425                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6426                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6427         }
6428
6429         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6430         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6431         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6432         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6433         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6434
6435         if (tg3_flag(tp, NVRAM))
6436                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6437 }
6438
6439 static void tg3_dump_state(struct tg3 *tp)
6440 {
6441         int i;
6442         u32 *regs;
6443
6444         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6445         if (!regs)
6446                 return;
6447
6448         if (tg3_flag(tp, PCI_EXPRESS)) {
6449                 /* Read up to but not including private PCI registers */
6450                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6451                         regs[i / sizeof(u32)] = tr32(i);
6452         } else
6453                 tg3_dump_legacy_regs(tp, regs);
6454
6455         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6456                 if (!regs[i + 0] && !regs[i + 1] &&
6457                     !regs[i + 2] && !regs[i + 3])
6458                         continue;
6459
6460                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6461                            i * 4,
6462                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6463         }
6464
6465         kfree(regs);
6466
6467         for (i = 0; i < tp->irq_cnt; i++) {
6468                 struct tg3_napi *tnapi = &tp->napi[i];
6469
6470                 /* SW status block */
6471                 netdev_err(tp->dev,
6472                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6473                            i,
6474                            tnapi->hw_status->status,
6475                            tnapi->hw_status->status_tag,
6476                            tnapi->hw_status->rx_jumbo_consumer,
6477                            tnapi->hw_status->rx_consumer,
6478                            tnapi->hw_status->rx_mini_consumer,
6479                            tnapi->hw_status->idx[0].rx_producer,
6480                            tnapi->hw_status->idx[0].tx_consumer);
6481
6482                 netdev_err(tp->dev,
6483                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6484                            i,
6485                            tnapi->last_tag, tnapi->last_irq_tag,
6486                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6487                            tnapi->rx_rcb_ptr,
6488                            tnapi->prodring.rx_std_prod_idx,
6489                            tnapi->prodring.rx_std_cons_idx,
6490                            tnapi->prodring.rx_jmb_prod_idx,
6491                            tnapi->prodring.rx_jmb_cons_idx);
6492         }
6493 }
6494
6495 /* This is called whenever we suspect that the system chipset is re-
6496  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6497  * is bogus tx completions. We try to recover by setting the
6498  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6499  * in the workqueue.
6500  */
6501 static void tg3_tx_recover(struct tg3 *tp)
6502 {
6503         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6504                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6505
6506         netdev_warn(tp->dev,
6507                     "The system may be re-ordering memory-mapped I/O "
6508                     "cycles to the network device, attempting to recover. "
6509                     "Please report the problem to the driver maintainer "
6510                     "and include system chipset information.\n");
6511
6512         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6513 }
6514
6515 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6516 {
6517         /* Tell compiler to fetch tx indices from memory. */
6518         barrier();
6519         return tnapi->tx_pending -
6520                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6521 }
6522
6523 /* Tigon3 never reports partial packet sends.  So we do not
6524  * need special logic to handle SKBs that have not had all
6525  * of their frags sent yet, like SunGEM does.
6526  */
6527 static void tg3_tx(struct tg3_napi *tnapi)
6528 {
6529         struct tg3 *tp = tnapi->tp;
6530         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6531         u32 sw_idx = tnapi->tx_cons;
6532         struct netdev_queue *txq;
6533         int index = tnapi - tp->napi;
6534         unsigned int pkts_compl = 0, bytes_compl = 0;
6535
6536         if (tg3_flag(tp, ENABLE_TSS))
6537                 index--;
6538
6539         txq = netdev_get_tx_queue(tp->dev, index);
6540
6541         while (sw_idx != hw_idx) {
6542                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6543                 struct sk_buff *skb = ri->skb;
6544                 int i, tx_bug = 0;
6545
6546                 if (unlikely(skb == NULL)) {
6547                         tg3_tx_recover(tp);
6548                         return;
6549                 }
6550
6551                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6552                         struct skb_shared_hwtstamps timestamp;
6553                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6554                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6555
6556                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6557
6558                         skb_tstamp_tx(skb, &timestamp);
6559                 }
6560
6561                 pci_unmap_single(tp->pdev,
6562                                  dma_unmap_addr(ri, mapping),
6563                                  skb_headlen(skb),
6564                                  PCI_DMA_TODEVICE);
6565
6566                 ri->skb = NULL;
6567
6568                 while (ri->fragmented) {
6569                         ri->fragmented = false;
6570                         sw_idx = NEXT_TX(sw_idx);
6571                         ri = &tnapi->tx_buffers[sw_idx];
6572                 }
6573
6574                 sw_idx = NEXT_TX(sw_idx);
6575
6576                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6577                         ri = &tnapi->tx_buffers[sw_idx];
6578                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6579                                 tx_bug = 1;
6580
6581                         pci_unmap_page(tp->pdev,
6582                                        dma_unmap_addr(ri, mapping),
6583                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6584                                        PCI_DMA_TODEVICE);
6585
6586                         while (ri->fragmented) {
6587                                 ri->fragmented = false;
6588                                 sw_idx = NEXT_TX(sw_idx);
6589                                 ri = &tnapi->tx_buffers[sw_idx];
6590                         }
6591
6592                         sw_idx = NEXT_TX(sw_idx);
6593                 }
6594
6595                 pkts_compl++;
6596                 bytes_compl += skb->len;
6597
6598                 dev_consume_skb_any(skb);
6599
6600                 if (unlikely(tx_bug)) {
6601                         tg3_tx_recover(tp);
6602                         return;
6603                 }
6604         }
6605
6606         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6607
6608         tnapi->tx_cons = sw_idx;
6609
6610         /* Need to make the tx_cons update visible to tg3_start_xmit()
6611          * before checking for netif_queue_stopped().  Without the
6612          * memory barrier, there is a small possibility that tg3_start_xmit()
6613          * will miss it and cause the queue to be stopped forever.
6614          */
6615         smp_mb();
6616
6617         if (unlikely(netif_tx_queue_stopped(txq) &&
6618                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6619                 __netif_tx_lock(txq, smp_processor_id());
6620                 if (netif_tx_queue_stopped(txq) &&
6621                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6622                         netif_tx_wake_queue(txq);
6623                 __netif_tx_unlock(txq);
6624         }
6625 }
6626
6627 static void tg3_frag_free(bool is_frag, void *data)
6628 {
6629         if (is_frag)
6630                 skb_free_frag(data);
6631         else
6632                 kfree(data);
6633 }
6634
6635 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6636 {
6637         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6638                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6639
6640         if (!ri->data)
6641                 return;
6642
6643         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6644                          map_sz, PCI_DMA_FROMDEVICE);
6645         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6646         ri->data = NULL;
6647 }
6648
6649
6650 /* Returns size of skb allocated or < 0 on error.
6651  *
6652  * We only need to fill in the address because the other members
6653  * of the RX descriptor are invariant, see tg3_init_rings.
6654  *
6655  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6656  * posting buffers we only dirty the first cache line of the RX
6657  * descriptor (containing the address).  Whereas for the RX status
6658  * buffers the cpu only reads the last cacheline of the RX descriptor
6659  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6660  */
6661 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6662                              u32 opaque_key, u32 dest_idx_unmasked,
6663                              unsigned int *frag_size)
6664 {
6665         struct tg3_rx_buffer_desc *desc;
6666         struct ring_info *map;
6667         u8 *data;
6668         dma_addr_t mapping;
6669         int skb_size, data_size, dest_idx;
6670
6671         switch (opaque_key) {
6672         case RXD_OPAQUE_RING_STD:
6673                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6674                 desc = &tpr->rx_std[dest_idx];
6675                 map = &tpr->rx_std_buffers[dest_idx];
6676                 data_size = tp->rx_pkt_map_sz;
6677                 break;
6678
6679         case RXD_OPAQUE_RING_JUMBO:
6680                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6681                 desc = &tpr->rx_jmb[dest_idx].std;
6682                 map = &tpr->rx_jmb_buffers[dest_idx];
6683                 data_size = TG3_RX_JMB_MAP_SZ;
6684                 break;
6685
6686         default:
6687                 return -EINVAL;
6688         }
6689
6690         /* Do not overwrite any of the map or rp information
6691          * until we are sure we can commit to a new buffer.
6692          *
6693          * Callers depend upon this behavior and assume that
6694          * we leave everything unchanged if we fail.
6695          */
6696         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6697                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6698         if (skb_size <= PAGE_SIZE) {
6699                 data = netdev_alloc_frag(skb_size);
6700                 *frag_size = skb_size;
6701         } else {
6702                 data = kmalloc(skb_size, GFP_ATOMIC);
6703                 *frag_size = 0;
6704         }
6705         if (!data)
6706                 return -ENOMEM;
6707
6708         mapping = pci_map_single(tp->pdev,
6709                                  data + TG3_RX_OFFSET(tp),
6710                                  data_size,
6711                                  PCI_DMA_FROMDEVICE);
6712         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6713                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6714                 return -EIO;
6715         }
6716
6717         map->data = data;
6718         dma_unmap_addr_set(map, mapping, mapping);
6719
6720         desc->addr_hi = ((u64)mapping >> 32);
6721         desc->addr_lo = ((u64)mapping & 0xffffffff);
6722
6723         return data_size;
6724 }
6725
6726 /* We only need to move over in the address because the other
6727  * members of the RX descriptor are invariant.  See notes above
6728  * tg3_alloc_rx_data for full details.
6729  */
6730 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6731                            struct tg3_rx_prodring_set *dpr,
6732                            u32 opaque_key, int src_idx,
6733                            u32 dest_idx_unmasked)
6734 {
6735         struct tg3 *tp = tnapi->tp;
6736         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6737         struct ring_info *src_map, *dest_map;
6738         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6739         int dest_idx;
6740
6741         switch (opaque_key) {
6742         case RXD_OPAQUE_RING_STD:
6743                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6744                 dest_desc = &dpr->rx_std[dest_idx];
6745                 dest_map = &dpr->rx_std_buffers[dest_idx];
6746                 src_desc = &spr->rx_std[src_idx];
6747                 src_map = &spr->rx_std_buffers[src_idx];
6748                 break;
6749
6750         case RXD_OPAQUE_RING_JUMBO:
6751                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6752                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6753                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6754                 src_desc = &spr->rx_jmb[src_idx].std;
6755                 src_map = &spr->rx_jmb_buffers[src_idx];
6756                 break;
6757
6758         default:
6759                 return;
6760         }
6761
6762         dest_map->data = src_map->data;
6763         dma_unmap_addr_set(dest_map, mapping,
6764                            dma_unmap_addr(src_map, mapping));
6765         dest_desc->addr_hi = src_desc->addr_hi;
6766         dest_desc->addr_lo = src_desc->addr_lo;
6767
6768         /* Ensure that the update to the skb happens after the physical
6769          * addresses have been transferred to the new BD location.
6770          */
6771         smp_wmb();
6772
6773         src_map->data = NULL;
6774 }
6775
6776 /* The RX ring scheme is composed of multiple rings which post fresh
6777  * buffers to the chip, and one special ring the chip uses to report
6778  * status back to the host.
6779  *
6780  * The special ring reports the status of received packets to the
6781  * host.  The chip does not write into the original descriptor the
6782  * RX buffer was obtained from.  The chip simply takes the original
6783  * descriptor as provided by the host, updates the status and length
6784  * field, then writes this into the next status ring entry.
6785  *
6786  * Each ring the host uses to post buffers to the chip is described
6787  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6788  * it is first placed into the on-chip ram.  When the packet's length
6789  * is known, it walks down the TG3_BDINFO entries to select the ring.
6790  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6791  * which is within the range of the new packet's length is chosen.
6792  *
6793  * The "separate ring for rx status" scheme may sound queer, but it makes
6794  * sense from a cache coherency perspective.  If only the host writes
6795  * to the buffer post rings, and only the chip writes to the rx status
6796  * rings, then cache lines never move beyond shared-modified state.
6797  * If both the host and chip were to write into the same ring, cache line
6798  * eviction could occur since both entities want it in an exclusive state.
6799  */
6800 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6801 {
6802         struct tg3 *tp = tnapi->tp;
6803         u32 work_mask, rx_std_posted = 0;
6804         u32 std_prod_idx, jmb_prod_idx;
6805         u32 sw_idx = tnapi->rx_rcb_ptr;
6806         u16 hw_idx;
6807         int received;
6808         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6809
6810         hw_idx = *(tnapi->rx_rcb_prod_idx);
6811         /*
6812          * We need to order the read of hw_idx and the read of
6813          * the opaque cookie.
6814          */
6815         rmb();
6816         work_mask = 0;
6817         received = 0;
6818         std_prod_idx = tpr->rx_std_prod_idx;
6819         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6820         while (sw_idx != hw_idx && budget > 0) {
6821                 struct ring_info *ri;
6822                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6823                 unsigned int len;
6824                 struct sk_buff *skb;
6825                 dma_addr_t dma_addr;
6826                 u32 opaque_key, desc_idx, *post_ptr;
6827                 u8 *data;
6828                 u64 tstamp = 0;
6829
6830                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6831                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6832                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6833                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6834                         dma_addr = dma_unmap_addr(ri, mapping);
6835                         data = ri->data;
6836                         post_ptr = &std_prod_idx;
6837                         rx_std_posted++;
6838                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6839                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6840                         dma_addr = dma_unmap_addr(ri, mapping);
6841                         data = ri->data;
6842                         post_ptr = &jmb_prod_idx;
6843                 } else
6844                         goto next_pkt_nopost;
6845
6846                 work_mask |= opaque_key;
6847
6848                 if (desc->err_vlan & RXD_ERR_MASK) {
6849                 drop_it:
6850                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6851                                        desc_idx, *post_ptr);
6852                 drop_it_no_recycle:
6853                         /* Other statistics kept track of by card. */
6854                         tp->rx_dropped++;
6855                         goto next_pkt;
6856                 }
6857
6858                 prefetch(data + TG3_RX_OFFSET(tp));
6859                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6860                       ETH_FCS_LEN;
6861
6862                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6863                      RXD_FLAG_PTPSTAT_PTPV1 ||
6864                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865                      RXD_FLAG_PTPSTAT_PTPV2) {
6866                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6867                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6868                 }
6869
6870                 if (len > TG3_RX_COPY_THRESH(tp)) {
6871                         int skb_size;
6872                         unsigned int frag_size;
6873
6874                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6875                                                     *post_ptr, &frag_size);
6876                         if (skb_size < 0)
6877                                 goto drop_it;
6878
6879                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6880                                          PCI_DMA_FROMDEVICE);
6881
6882                         /* Ensure that the update to the data happens
6883                          * after the usage of the old DMA mapping.
6884                          */
6885                         smp_wmb();
6886
6887                         ri->data = NULL;
6888
6889                         skb = build_skb(data, frag_size);
6890                         if (!skb) {
6891                                 tg3_frag_free(frag_size != 0, data);
6892                                 goto drop_it_no_recycle;
6893                         }
6894                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6895                 } else {
6896                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6897                                        desc_idx, *post_ptr);
6898
6899                         skb = netdev_alloc_skb(tp->dev,
6900                                                len + TG3_RAW_IP_ALIGN);
6901                         if (skb == NULL)
6902                                 goto drop_it_no_recycle;
6903
6904                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6905                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6906                         memcpy(skb->data,
6907                                data + TG3_RX_OFFSET(tp),
6908                                len);
6909                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6910                 }
6911
6912                 skb_put(skb, len);
6913                 if (tstamp)
6914                         tg3_hwclock_to_timestamp(tp, tstamp,
6915                                                  skb_hwtstamps(skb));
6916
6917                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6918                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6919                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6920                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6921                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6922                 else
6923                         skb_checksum_none_assert(skb);
6924
6925                 skb->protocol = eth_type_trans(skb, tp->dev);
6926
6927                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6928                     skb->protocol != htons(ETH_P_8021Q) &&
6929                     skb->protocol != htons(ETH_P_8021AD)) {
6930                         dev_kfree_skb_any(skb);
6931                         goto drop_it_no_recycle;
6932                 }
6933
6934                 if (desc->type_flags & RXD_FLAG_VLAN &&
6935                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6936                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6937                                                desc->err_vlan & RXD_VLAN_MASK);
6938
6939                 napi_gro_receive(&tnapi->napi, skb);
6940
6941                 received++;
6942                 budget--;
6943
6944 next_pkt:
6945                 (*post_ptr)++;
6946
6947                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6948                         tpr->rx_std_prod_idx = std_prod_idx &
6949                                                tp->rx_std_ring_mask;
6950                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6951                                      tpr->rx_std_prod_idx);
6952                         work_mask &= ~RXD_OPAQUE_RING_STD;
6953                         rx_std_posted = 0;
6954                 }
6955 next_pkt_nopost:
6956                 sw_idx++;
6957                 sw_idx &= tp->rx_ret_ring_mask;
6958
6959                 /* Refresh hw_idx to see if there is new work */
6960                 if (sw_idx == hw_idx) {
6961                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6962                         rmb();
6963                 }
6964         }
6965
6966         /* ACK the status ring. */
6967         tnapi->rx_rcb_ptr = sw_idx;
6968         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6969
6970         /* Refill RX ring(s). */
6971         if (!tg3_flag(tp, ENABLE_RSS)) {
6972                 /* Sync BD data before updating mailbox */
6973                 wmb();
6974
6975                 if (work_mask & RXD_OPAQUE_RING_STD) {
6976                         tpr->rx_std_prod_idx = std_prod_idx &
6977                                                tp->rx_std_ring_mask;
6978                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6979                                      tpr->rx_std_prod_idx);
6980                 }
6981                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6982                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6983                                                tp->rx_jmb_ring_mask;
6984                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6985                                      tpr->rx_jmb_prod_idx);
6986                 }
6987                 mmiowb();
6988         } else if (work_mask) {
6989                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6990                  * updated before the producer indices can be updated.
6991                  */
6992                 smp_wmb();
6993
6994                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6995                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6996
6997                 if (tnapi != &tp->napi[1]) {
6998                         tp->rx_refill = true;
6999                         napi_schedule(&tp->napi[1].napi);
7000                 }
7001         }
7002
7003         return received;
7004 }
7005
7006 static void tg3_poll_link(struct tg3 *tp)
7007 {
7008         /* handle link change and other phy events */
7009         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7010                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7011
7012                 if (sblk->status & SD_STATUS_LINK_CHG) {
7013                         sblk->status = SD_STATUS_UPDATED |
7014                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7015                         spin_lock(&tp->lock);
7016                         if (tg3_flag(tp, USE_PHYLIB)) {
7017                                 tw32_f(MAC_STATUS,
7018                                      (MAC_STATUS_SYNC_CHANGED |
7019                                       MAC_STATUS_CFG_CHANGED |
7020                                       MAC_STATUS_MI_COMPLETION |
7021                                       MAC_STATUS_LNKSTATE_CHANGED));
7022                                 udelay(40);
7023                         } else
7024                                 tg3_setup_phy(tp, false);
7025                         spin_unlock(&tp->lock);
7026                 }
7027         }
7028 }
7029
7030 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7031                                 struct tg3_rx_prodring_set *dpr,
7032                                 struct tg3_rx_prodring_set *spr)
7033 {
7034         u32 si, di, cpycnt, src_prod_idx;
7035         int i, err = 0;
7036
7037         while (1) {
7038                 src_prod_idx = spr->rx_std_prod_idx;
7039
7040                 /* Make sure updates to the rx_std_buffers[] entries and the
7041                  * standard producer index are seen in the correct order.
7042                  */
7043                 smp_rmb();
7044
7045                 if (spr->rx_std_cons_idx == src_prod_idx)
7046                         break;
7047
7048                 if (spr->rx_std_cons_idx < src_prod_idx)
7049                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7050                 else
7051                         cpycnt = tp->rx_std_ring_mask + 1 -
7052                                  spr->rx_std_cons_idx;
7053
7054                 cpycnt = min(cpycnt,
7055                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7056
7057                 si = spr->rx_std_cons_idx;
7058                 di = dpr->rx_std_prod_idx;
7059
7060                 for (i = di; i < di + cpycnt; i++) {
7061                         if (dpr->rx_std_buffers[i].data) {
7062                                 cpycnt = i - di;
7063                                 err = -ENOSPC;
7064                                 break;
7065                         }
7066                 }
7067
7068                 if (!cpycnt)
7069                         break;
7070
7071                 /* Ensure that updates to the rx_std_buffers ring and the
7072                  * shadowed hardware producer ring from tg3_recycle_skb() are
7073                  * ordered correctly WRT the skb check above.
7074                  */
7075                 smp_rmb();
7076
7077                 memcpy(&dpr->rx_std_buffers[di],
7078                        &spr->rx_std_buffers[si],
7079                        cpycnt * sizeof(struct ring_info));
7080
7081                 for (i = 0; i < cpycnt; i++, di++, si++) {
7082                         struct tg3_rx_buffer_desc *sbd, *dbd;
7083                         sbd = &spr->rx_std[si];
7084                         dbd = &dpr->rx_std[di];
7085                         dbd->addr_hi = sbd->addr_hi;
7086                         dbd->addr_lo = sbd->addr_lo;
7087                 }
7088
7089                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7090                                        tp->rx_std_ring_mask;
7091                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7092                                        tp->rx_std_ring_mask;
7093         }
7094
7095         while (1) {
7096                 src_prod_idx = spr->rx_jmb_prod_idx;
7097
7098                 /* Make sure updates to the rx_jmb_buffers[] entries and
7099                  * the jumbo producer index are seen in the correct order.
7100                  */
7101                 smp_rmb();
7102
7103                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7104                         break;
7105
7106                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7107                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7108                 else
7109                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7110                                  spr->rx_jmb_cons_idx;
7111
7112                 cpycnt = min(cpycnt,
7113                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7114
7115                 si = spr->rx_jmb_cons_idx;
7116                 di = dpr->rx_jmb_prod_idx;
7117
7118                 for (i = di; i < di + cpycnt; i++) {
7119                         if (dpr->rx_jmb_buffers[i].data) {
7120                                 cpycnt = i - di;
7121                                 err = -ENOSPC;
7122                                 break;
7123                         }
7124                 }
7125
7126                 if (!cpycnt)
7127                         break;
7128
7129                 /* Ensure that updates to the rx_jmb_buffers ring and the
7130                  * shadowed hardware producer ring from tg3_recycle_skb() are
7131                  * ordered correctly WRT the skb check above.
7132                  */
7133                 smp_rmb();
7134
7135                 memcpy(&dpr->rx_jmb_buffers[di],
7136                        &spr->rx_jmb_buffers[si],
7137                        cpycnt * sizeof(struct ring_info));
7138
7139                 for (i = 0; i < cpycnt; i++, di++, si++) {
7140                         struct tg3_rx_buffer_desc *sbd, *dbd;
7141                         sbd = &spr->rx_jmb[si].std;
7142                         dbd = &dpr->rx_jmb[di].std;
7143                         dbd->addr_hi = sbd->addr_hi;
7144                         dbd->addr_lo = sbd->addr_lo;
7145                 }
7146
7147                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7148                                        tp->rx_jmb_ring_mask;
7149                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7150                                        tp->rx_jmb_ring_mask;
7151         }
7152
7153         return err;
7154 }
7155
7156 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7157 {
7158         struct tg3 *tp = tnapi->tp;
7159
7160         /* run TX completion thread */
7161         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7162                 tg3_tx(tnapi);
7163                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7164                         return work_done;
7165         }
7166
7167         if (!tnapi->rx_rcb_prod_idx)
7168                 return work_done;
7169
7170         /* run RX thread, within the bounds set by NAPI.
7171          * All RX "locking" is done by ensuring outside
7172          * code synchronizes with tg3->napi.poll()
7173          */
7174         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7175                 work_done += tg3_rx(tnapi, budget - work_done);
7176
7177         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7178                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7179                 int i, err = 0;
7180                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7181                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7182
7183                 tp->rx_refill = false;
7184                 for (i = 1; i <= tp->rxq_cnt; i++)
7185                         err |= tg3_rx_prodring_xfer(tp, dpr,
7186                                                     &tp->napi[i].prodring);
7187
7188                 wmb();
7189
7190                 if (std_prod_idx != dpr->rx_std_prod_idx)
7191                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7192                                      dpr->rx_std_prod_idx);
7193
7194                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7195                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7196                                      dpr->rx_jmb_prod_idx);
7197
7198                 mmiowb();
7199
7200                 if (err)
7201                         tw32_f(HOSTCC_MODE, tp->coal_now);
7202         }
7203
7204         return work_done;
7205 }
7206
7207 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7208 {
7209         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210                 schedule_work(&tp->reset_task);
7211 }
7212
7213 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7214 {
7215         cancel_work_sync(&tp->reset_task);
7216         tg3_flag_clear(tp, RESET_TASK_PENDING);
7217         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7218 }
7219
7220 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7221 {
7222         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7223         struct tg3 *tp = tnapi->tp;
7224         int work_done = 0;
7225         struct tg3_hw_status *sblk = tnapi->hw_status;
7226
7227         while (1) {
7228                 work_done = tg3_poll_work(tnapi, work_done, budget);
7229
7230                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7231                         goto tx_recovery;
7232
7233                 if (unlikely(work_done >= budget))
7234                         break;
7235
7236                 /* tp->last_tag is used in tg3_int_reenable() below
7237                  * to tell the hw how much work has been processed,
7238                  * so we must read it before checking for more work.
7239                  */
7240                 tnapi->last_tag = sblk->status_tag;
7241                 tnapi->last_irq_tag = tnapi->last_tag;
7242                 rmb();
7243
7244                 /* check for RX/TX work to do */
7245                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7246                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7247
7248                         /* This test here is not race free, but will reduce
7249                          * the number of interrupts by looping again.
7250                          */
7251                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7252                                 continue;
7253
7254                         napi_complete_done(napi, work_done);
7255                         /* Reenable interrupts. */
7256                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7257
7258                         /* This test here is synchronized by napi_schedule()
7259                          * and napi_complete() to close the race condition.
7260                          */
7261                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7262                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7263                                                   HOSTCC_MODE_ENABLE |
7264                                                   tnapi->coal_now);
7265                         }
7266                         mmiowb();
7267                         break;
7268                 }
7269         }
7270
7271         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7272         return work_done;
7273
7274 tx_recovery:
7275         /* work_done is guaranteed to be less than budget. */
7276         napi_complete(napi);
7277         tg3_reset_task_schedule(tp);
7278         return work_done;
7279 }
7280
7281 static void tg3_process_error(struct tg3 *tp)
7282 {
7283         u32 val;
7284         bool real_error = false;
7285
7286         if (tg3_flag(tp, ERROR_PROCESSED))
7287                 return;
7288
7289         /* Check Flow Attention register */
7290         val = tr32(HOSTCC_FLOW_ATTN);
7291         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7292                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7293                 real_error = true;
7294         }
7295
7296         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7297                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7298                 real_error = true;
7299         }
7300
7301         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7302                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7303                 real_error = true;
7304         }
7305
7306         if (!real_error)
7307                 return;
7308
7309         tg3_dump_state(tp);
7310
7311         tg3_flag_set(tp, ERROR_PROCESSED);
7312         tg3_reset_task_schedule(tp);
7313 }
7314
7315 static int tg3_poll(struct napi_struct *napi, int budget)
7316 {
7317         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7318         struct tg3 *tp = tnapi->tp;
7319         int work_done = 0;
7320         struct tg3_hw_status *sblk = tnapi->hw_status;
7321
7322         while (1) {
7323                 if (sblk->status & SD_STATUS_ERROR)
7324                         tg3_process_error(tp);
7325
7326                 tg3_poll_link(tp);
7327
7328                 work_done = tg3_poll_work(tnapi, work_done, budget);
7329
7330                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7331                         goto tx_recovery;
7332
7333                 if (unlikely(work_done >= budget))
7334                         break;
7335
7336                 if (tg3_flag(tp, TAGGED_STATUS)) {
7337                         /* tp->last_tag is used in tg3_int_reenable() below
7338                          * to tell the hw how much work has been processed,
7339                          * so we must read it before checking for more work.
7340                          */
7341                         tnapi->last_tag = sblk->status_tag;
7342                         tnapi->last_irq_tag = tnapi->last_tag;
7343                         rmb();
7344                 } else
7345                         sblk->status &= ~SD_STATUS_UPDATED;
7346
7347                 if (likely(!tg3_has_work(tnapi))) {
7348                         napi_complete_done(napi, work_done);
7349                         tg3_int_reenable(tnapi);
7350                         break;
7351                 }
7352         }
7353
7354         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7355         return work_done;
7356
7357 tx_recovery:
7358         /* work_done is guaranteed to be less than budget. */
7359         napi_complete(napi);
7360         tg3_reset_task_schedule(tp);
7361         return work_done;
7362 }
7363
7364 static void tg3_napi_disable(struct tg3 *tp)
7365 {
7366         int i;
7367
7368         for (i = tp->irq_cnt - 1; i >= 0; i--)
7369                 napi_disable(&tp->napi[i].napi);
7370 }
7371
7372 static void tg3_napi_enable(struct tg3 *tp)
7373 {
7374         int i;
7375
7376         for (i = 0; i < tp->irq_cnt; i++)
7377                 napi_enable(&tp->napi[i].napi);
7378 }
7379
7380 static void tg3_napi_init(struct tg3 *tp)
7381 {
7382         int i;
7383
7384         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7385         for (i = 1; i < tp->irq_cnt; i++)
7386                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7387 }
7388
7389 static void tg3_napi_fini(struct tg3 *tp)
7390 {
7391         int i;
7392
7393         for (i = 0; i < tp->irq_cnt; i++)
7394                 netif_napi_del(&tp->napi[i].napi);
7395 }
7396
7397 static inline void tg3_netif_stop(struct tg3 *tp)
7398 {
7399         netif_trans_update(tp->dev);    /* prevent tx timeout */
7400         tg3_napi_disable(tp);
7401         netif_carrier_off(tp->dev);
7402         netif_tx_disable(tp->dev);
7403 }
7404
7405 /* tp->lock must be held */
7406 static inline void tg3_netif_start(struct tg3 *tp)
7407 {
7408         tg3_ptp_resume(tp);
7409
7410         /* NOTE: unconditional netif_tx_wake_all_queues is only
7411          * appropriate so long as all callers are assured to
7412          * have free tx slots (such as after tg3_init_hw)
7413          */
7414         netif_tx_wake_all_queues(tp->dev);
7415
7416         if (tp->link_up)
7417                 netif_carrier_on(tp->dev);
7418
7419         tg3_napi_enable(tp);
7420         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7421         tg3_enable_ints(tp);
7422 }
7423
7424 static void tg3_irq_quiesce(struct tg3 *tp)
7425         __releases(tp->lock)
7426         __acquires(tp->lock)
7427 {
7428         int i;
7429
7430         BUG_ON(tp->irq_sync);
7431
7432         tp->irq_sync = 1;
7433         smp_mb();
7434
7435         spin_unlock_bh(&tp->lock);
7436
7437         for (i = 0; i < tp->irq_cnt; i++)
7438                 synchronize_irq(tp->napi[i].irq_vec);
7439
7440         spin_lock_bh(&tp->lock);
7441 }
7442
7443 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7444  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7445  * with as well.  Most of the time, this is not necessary except when
7446  * shutting down the device.
7447  */
7448 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7449 {
7450         spin_lock_bh(&tp->lock);
7451         if (irq_sync)
7452                 tg3_irq_quiesce(tp);
7453 }
7454
7455 static inline void tg3_full_unlock(struct tg3 *tp)
7456 {
7457         spin_unlock_bh(&tp->lock);
7458 }
7459
7460 /* One-shot MSI handler - Chip automatically disables interrupt
7461  * after sending MSI so driver doesn't have to do it.
7462  */
7463 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7464 {
7465         struct tg3_napi *tnapi = dev_id;
7466         struct tg3 *tp = tnapi->tp;
7467
7468         prefetch(tnapi->hw_status);
7469         if (tnapi->rx_rcb)
7470                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7471
7472         if (likely(!tg3_irq_sync(tp)))
7473                 napi_schedule(&tnapi->napi);
7474
7475         return IRQ_HANDLED;
7476 }
7477
7478 /* MSI ISR - No need to check for interrupt sharing and no need to
7479  * flush status block and interrupt mailbox. PCI ordering rules
7480  * guarantee that MSI will arrive after the status block.
7481  */
7482 static irqreturn_t tg3_msi(int irq, void *dev_id)
7483 {
7484         struct tg3_napi *tnapi = dev_id;
7485         struct tg3 *tp = tnapi->tp;
7486
7487         prefetch(tnapi->hw_status);
7488         if (tnapi->rx_rcb)
7489                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7490         /*
7491          * Writing any value to intr-mbox-0 clears PCI INTA# and
7492          * chip-internal interrupt pending events.
7493          * Writing non-zero to intr-mbox-0 additional tells the
7494          * NIC to stop sending us irqs, engaging "in-intr-handler"
7495          * event coalescing.
7496          */
7497         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7498         if (likely(!tg3_irq_sync(tp)))
7499                 napi_schedule(&tnapi->napi);
7500
7501         return IRQ_RETVAL(1);
7502 }
7503
7504 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7505 {
7506         struct tg3_napi *tnapi = dev_id;
7507         struct tg3 *tp = tnapi->tp;
7508         struct tg3_hw_status *sblk = tnapi->hw_status;
7509         unsigned int handled = 1;
7510
7511         /* In INTx mode, it is possible for the interrupt to arrive at
7512          * the CPU before the status block posted prior to the interrupt.
7513          * Reading the PCI State register will confirm whether the
7514          * interrupt is ours and will flush the status block.
7515          */
7516         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7517                 if (tg3_flag(tp, CHIP_RESETTING) ||
7518                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7519                         handled = 0;
7520                         goto out;
7521                 }
7522         }
7523
7524         /*
7525          * Writing any value to intr-mbox-0 clears PCI INTA# and
7526          * chip-internal interrupt pending events.
7527          * Writing non-zero to intr-mbox-0 additional tells the
7528          * NIC to stop sending us irqs, engaging "in-intr-handler"
7529          * event coalescing.
7530          *
7531          * Flush the mailbox to de-assert the IRQ immediately to prevent
7532          * spurious interrupts.  The flush impacts performance but
7533          * excessive spurious interrupts can be worse in some cases.
7534          */
7535         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7536         if (tg3_irq_sync(tp))
7537                 goto out;
7538         sblk->status &= ~SD_STATUS_UPDATED;
7539         if (likely(tg3_has_work(tnapi))) {
7540                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7541                 napi_schedule(&tnapi->napi);
7542         } else {
7543                 /* No work, shared interrupt perhaps?  re-enable
7544                  * interrupts, and flush that PCI write
7545                  */
7546                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7547                                0x00000000);
7548         }
7549 out:
7550         return IRQ_RETVAL(handled);
7551 }
7552
7553 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7554 {
7555         struct tg3_napi *tnapi = dev_id;
7556         struct tg3 *tp = tnapi->tp;
7557         struct tg3_hw_status *sblk = tnapi->hw_status;
7558         unsigned int handled = 1;
7559
7560         /* In INTx mode, it is possible for the interrupt to arrive at
7561          * the CPU before the status block posted prior to the interrupt.
7562          * Reading the PCI State register will confirm whether the
7563          * interrupt is ours and will flush the status block.
7564          */
7565         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7566                 if (tg3_flag(tp, CHIP_RESETTING) ||
7567                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7568                         handled = 0;
7569                         goto out;
7570                 }
7571         }
7572
7573         /*
7574          * writing any value to intr-mbox-0 clears PCI INTA# and
7575          * chip-internal interrupt pending events.
7576          * writing non-zero to intr-mbox-0 additional tells the
7577          * NIC to stop sending us irqs, engaging "in-intr-handler"
7578          * event coalescing.
7579          *
7580          * Flush the mailbox to de-assert the IRQ immediately to prevent
7581          * spurious interrupts.  The flush impacts performance but
7582          * excessive spurious interrupts can be worse in some cases.
7583          */
7584         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7585
7586         /*
7587          * In a shared interrupt configuration, sometimes other devices'
7588          * interrupts will scream.  We record the current status tag here
7589          * so that the above check can report that the screaming interrupts
7590          * are unhandled.  Eventually they will be silenced.
7591          */
7592         tnapi->last_irq_tag = sblk->status_tag;
7593
7594         if (tg3_irq_sync(tp))
7595                 goto out;
7596
7597         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7598
7599         napi_schedule(&tnapi->napi);
7600
7601 out:
7602         return IRQ_RETVAL(handled);
7603 }
7604
7605 /* ISR for interrupt test */
7606 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7607 {
7608         struct tg3_napi *tnapi = dev_id;
7609         struct tg3 *tp = tnapi->tp;
7610         struct tg3_hw_status *sblk = tnapi->hw_status;
7611
7612         if ((sblk->status & SD_STATUS_UPDATED) ||
7613             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7614                 tg3_disable_ints(tp);
7615                 return IRQ_RETVAL(1);
7616         }
7617         return IRQ_RETVAL(0);
7618 }
7619
7620 #ifdef CONFIG_NET_POLL_CONTROLLER
7621 static void tg3_poll_controller(struct net_device *dev)
7622 {
7623         int i;
7624         struct tg3 *tp = netdev_priv(dev);
7625
7626         if (tg3_irq_sync(tp))
7627                 return;
7628
7629         for (i = 0; i < tp->irq_cnt; i++)
7630                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7631 }
7632 #endif
7633
7634 static void tg3_tx_timeout(struct net_device *dev)
7635 {
7636         struct tg3 *tp = netdev_priv(dev);
7637
7638         if (netif_msg_tx_err(tp)) {
7639                 netdev_err(dev, "transmit timed out, resetting\n");
7640                 tg3_dump_state(tp);
7641         }
7642
7643         tg3_reset_task_schedule(tp);
7644 }
7645
7646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7648 {
7649         u32 base = (u32) mapping & 0xffffffff;
7650
7651         return base + len + 8 < base;
7652 }
7653
7654 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7655  * of any 4GB boundaries: 4G, 8G, etc
7656  */
7657 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7658                                            u32 len, u32 mss)
7659 {
7660         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7661                 u32 base = (u32) mapping & 0xffffffff;
7662
7663                 return ((base + len + (mss & 0x3fff)) < base);
7664         }
7665         return 0;
7666 }
7667
7668 /* Test for DMA addresses > 40-bit */
7669 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7670                                           int len)
7671 {
7672 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7673         if (tg3_flag(tp, 40BIT_DMA_BUG))
7674                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7675         return 0;
7676 #else
7677         return 0;
7678 #endif
7679 }
7680
7681 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7682                                  dma_addr_t mapping, u32 len, u32 flags,
7683                                  u32 mss, u32 vlan)
7684 {
7685         txbd->addr_hi = ((u64) mapping >> 32);
7686         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7687         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7688         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7689 }
7690
7691 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7692                             dma_addr_t map, u32 len, u32 flags,
7693                             u32 mss, u32 vlan)
7694 {
7695         struct tg3 *tp = tnapi->tp;
7696         bool hwbug = false;
7697
7698         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7699                 hwbug = true;
7700
7701         if (tg3_4g_overflow_test(map, len))
7702                 hwbug = true;
7703
7704         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7705                 hwbug = true;
7706
7707         if (tg3_40bit_overflow_test(tp, map, len))
7708                 hwbug = true;
7709
7710         if (tp->dma_limit) {
7711                 u32 prvidx = *entry;
7712                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7713                 while (len > tp->dma_limit && *budget) {
7714                         u32 frag_len = tp->dma_limit;
7715                         len -= tp->dma_limit;
7716
7717                         /* Avoid the 8byte DMA problem */
7718                         if (len <= 8) {
7719                                 len += tp->dma_limit / 2;
7720                                 frag_len = tp->dma_limit / 2;
7721                         }
7722
7723                         tnapi->tx_buffers[*entry].fragmented = true;
7724
7725                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726                                       frag_len, tmp_flag, mss, vlan);
7727                         *budget -= 1;
7728                         prvidx = *entry;
7729                         *entry = NEXT_TX(*entry);
7730
7731                         map += frag_len;
7732                 }
7733
7734                 if (len) {
7735                         if (*budget) {
7736                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737                                               len, flags, mss, vlan);
7738                                 *budget -= 1;
7739                                 *entry = NEXT_TX(*entry);
7740                         } else {
7741                                 hwbug = true;
7742                                 tnapi->tx_buffers[prvidx].fragmented = false;
7743                         }
7744                 }
7745         } else {
7746                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747                               len, flags, mss, vlan);
7748                 *entry = NEXT_TX(*entry);
7749         }
7750
7751         return hwbug;
7752 }
7753
7754 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7755 {
7756         int i;
7757         struct sk_buff *skb;
7758         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7759
7760         skb = txb->skb;
7761         txb->skb = NULL;
7762
7763         pci_unmap_single(tnapi->tp->pdev,
7764                          dma_unmap_addr(txb, mapping),
7765                          skb_headlen(skb),
7766                          PCI_DMA_TODEVICE);
7767
7768         while (txb->fragmented) {
7769                 txb->fragmented = false;
7770                 entry = NEXT_TX(entry);
7771                 txb = &tnapi->tx_buffers[entry];
7772         }
7773
7774         for (i = 0; i <= last; i++) {
7775                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7776
7777                 entry = NEXT_TX(entry);
7778                 txb = &tnapi->tx_buffers[entry];
7779
7780                 pci_unmap_page(tnapi->tp->pdev,
7781                                dma_unmap_addr(txb, mapping),
7782                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7783
7784                 while (txb->fragmented) {
7785                         txb->fragmented = false;
7786                         entry = NEXT_TX(entry);
7787                         txb = &tnapi->tx_buffers[entry];
7788                 }
7789         }
7790 }
7791
7792 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7793 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7794                                        struct sk_buff **pskb,
7795                                        u32 *entry, u32 *budget,
7796                                        u32 base_flags, u32 mss, u32 vlan)
7797 {
7798         struct tg3 *tp = tnapi->tp;
7799         struct sk_buff *new_skb, *skb = *pskb;
7800         dma_addr_t new_addr = 0;
7801         int ret = 0;
7802
7803         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7804                 new_skb = skb_copy(skb, GFP_ATOMIC);
7805         else {
7806                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7807
7808                 new_skb = skb_copy_expand(skb,
7809                                           skb_headroom(skb) + more_headroom,
7810                                           skb_tailroom(skb), GFP_ATOMIC);
7811         }
7812
7813         if (!new_skb) {
7814                 ret = -1;
7815         } else {
7816                 /* New SKB is guaranteed to be linear. */
7817                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7818                                           PCI_DMA_TODEVICE);
7819                 /* Make sure the mapping succeeded */
7820                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7821                         dev_kfree_skb_any(new_skb);
7822                         ret = -1;
7823                 } else {
7824                         u32 save_entry = *entry;
7825
7826                         base_flags |= TXD_FLAG_END;
7827
7828                         tnapi->tx_buffers[*entry].skb = new_skb;
7829                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7830                                            mapping, new_addr);
7831
7832                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7833                                             new_skb->len, base_flags,
7834                                             mss, vlan)) {
7835                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7836                                 dev_kfree_skb_any(new_skb);
7837                                 ret = -1;
7838                         }
7839                 }
7840         }
7841
7842         dev_consume_skb_any(skb);
7843         *pskb = new_skb;
7844         return ret;
7845 }
7846
7847 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7848 {
7849         /* Check if we will never have enough descriptors,
7850          * as gso_segs can be more than current ring size
7851          */
7852         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7853 }
7854
7855 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7856
7857 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7858  * indicated in tg3_tx_frag_set()
7859  */
7860 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7861                        struct netdev_queue *txq, struct sk_buff *skb)
7862 {
7863         struct sk_buff *segs, *nskb;
7864         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7865
7866         /* Estimate the number of fragments in the worst case */
7867         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7868                 netif_tx_stop_queue(txq);
7869
7870                 /* netif_tx_stop_queue() must be done before checking
7871                  * checking tx index in tg3_tx_avail() below, because in
7872                  * tg3_tx(), we update tx index before checking for
7873                  * netif_tx_queue_stopped().
7874                  */
7875                 smp_mb();
7876                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7877                         return NETDEV_TX_BUSY;
7878
7879                 netif_tx_wake_queue(txq);
7880         }
7881
7882         segs = skb_gso_segment(skb, tp->dev->features &
7883                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7884         if (IS_ERR(segs) || !segs)
7885                 goto tg3_tso_bug_end;
7886
7887         do {
7888                 nskb = segs;
7889                 segs = segs->next;
7890                 nskb->next = NULL;
7891                 tg3_start_xmit(nskb, tp->dev);
7892         } while (segs);
7893
7894 tg3_tso_bug_end:
7895         dev_consume_skb_any(skb);
7896
7897         return NETDEV_TX_OK;
7898 }
7899
7900 /* hard_start_xmit for all devices */
7901 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7902 {
7903         struct tg3 *tp = netdev_priv(dev);
7904         u32 len, entry, base_flags, mss, vlan = 0;
7905         u32 budget;
7906         int i = -1, would_hit_hwbug;
7907         dma_addr_t mapping;
7908         struct tg3_napi *tnapi;
7909         struct netdev_queue *txq;
7910         unsigned int last;
7911         struct iphdr *iph = NULL;
7912         struct tcphdr *tcph = NULL;
7913         __sum16 tcp_csum = 0, ip_csum = 0;
7914         __be16 ip_tot_len = 0;
7915
7916         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7917         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7918         if (tg3_flag(tp, ENABLE_TSS))
7919                 tnapi++;
7920
7921         budget = tg3_tx_avail(tnapi);
7922
7923         /* We are running in BH disabled context with netif_tx_lock
7924          * and TX reclaim runs via tp->napi.poll inside of a software
7925          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7926          * no IRQ context deadlocks to worry about either.  Rejoice!
7927          */
7928         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7929                 if (!netif_tx_queue_stopped(txq)) {
7930                         netif_tx_stop_queue(txq);
7931
7932                         /* This is a hard error, log it. */
7933                         netdev_err(dev,
7934                                    "BUG! Tx Ring full when queue awake!\n");
7935                 }
7936                 return NETDEV_TX_BUSY;
7937         }
7938
7939         entry = tnapi->tx_prod;
7940         base_flags = 0;
7941
7942         mss = skb_shinfo(skb)->gso_size;
7943         if (mss) {
7944                 u32 tcp_opt_len, hdr_len;
7945
7946                 if (skb_cow_head(skb, 0))
7947                         goto drop;
7948
7949                 iph = ip_hdr(skb);
7950                 tcp_opt_len = tcp_optlen(skb);
7951
7952                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7953
7954                 /* HW/FW can not correctly segment packets that have been
7955                  * vlan encapsulated.
7956                  */
7957                 if (skb->protocol == htons(ETH_P_8021Q) ||
7958                     skb->protocol == htons(ETH_P_8021AD)) {
7959                         if (tg3_tso_bug_gso_check(tnapi, skb))
7960                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7961                         goto drop;
7962                 }
7963
7964                 if (!skb_is_gso_v6(skb)) {
7965                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7966                             tg3_flag(tp, TSO_BUG)) {
7967                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7968                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7969                                 goto drop;
7970                         }
7971                         ip_csum = iph->check;
7972                         ip_tot_len = iph->tot_len;
7973                         iph->check = 0;
7974                         iph->tot_len = htons(mss + hdr_len);
7975                 }
7976
7977                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7978                                TXD_FLAG_CPU_POST_DMA);
7979
7980                 tcph = tcp_hdr(skb);
7981                 tcp_csum = tcph->check;
7982
7983                 if (tg3_flag(tp, HW_TSO_1) ||
7984                     tg3_flag(tp, HW_TSO_2) ||
7985                     tg3_flag(tp, HW_TSO_3)) {
7986                         tcph->check = 0;
7987                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7988                 } else {
7989                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7990                                                          0, IPPROTO_TCP, 0);
7991                 }
7992
7993                 if (tg3_flag(tp, HW_TSO_3)) {
7994                         mss |= (hdr_len & 0xc) << 12;
7995                         if (hdr_len & 0x10)
7996                                 base_flags |= 0x00000010;
7997                         base_flags |= (hdr_len & 0x3e0) << 5;
7998                 } else if (tg3_flag(tp, HW_TSO_2))
7999                         mss |= hdr_len << 9;
8000                 else if (tg3_flag(tp, HW_TSO_1) ||
8001                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8002                         if (tcp_opt_len || iph->ihl > 5) {
8003                                 int tsflags;
8004
8005                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8006                                 mss |= (tsflags << 11);
8007                         }
8008                 } else {
8009                         if (tcp_opt_len || iph->ihl > 5) {
8010                                 int tsflags;
8011
8012                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8013                                 base_flags |= tsflags << 12;
8014                         }
8015                 }
8016         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8017                 /* HW/FW can not correctly checksum packets that have been
8018                  * vlan encapsulated.
8019                  */
8020                 if (skb->protocol == htons(ETH_P_8021Q) ||
8021                     skb->protocol == htons(ETH_P_8021AD)) {
8022                         if (skb_checksum_help(skb))
8023                                 goto drop;
8024                 } else  {
8025                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8026                 }
8027         }
8028
8029         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8030             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8031                 base_flags |= TXD_FLAG_JMB_PKT;
8032
8033         if (skb_vlan_tag_present(skb)) {
8034                 base_flags |= TXD_FLAG_VLAN;
8035                 vlan = skb_vlan_tag_get(skb);
8036         }
8037
8038         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8039             tg3_flag(tp, TX_TSTAMP_EN)) {
8040                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8041                 base_flags |= TXD_FLAG_HWTSTAMP;
8042         }
8043
8044         len = skb_headlen(skb);
8045
8046         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8047         if (pci_dma_mapping_error(tp->pdev, mapping))
8048                 goto drop;
8049
8050
8051         tnapi->tx_buffers[entry].skb = skb;
8052         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8053
8054         would_hit_hwbug = 0;
8055
8056         if (tg3_flag(tp, 5701_DMA_BUG))
8057                 would_hit_hwbug = 1;
8058
8059         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8060                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8061                             mss, vlan)) {
8062                 would_hit_hwbug = 1;
8063         } else if (skb_shinfo(skb)->nr_frags > 0) {
8064                 u32 tmp_mss = mss;
8065
8066                 if (!tg3_flag(tp, HW_TSO_1) &&
8067                     !tg3_flag(tp, HW_TSO_2) &&
8068                     !tg3_flag(tp, HW_TSO_3))
8069                         tmp_mss = 0;
8070
8071                 /* Now loop through additional data
8072                  * fragments, and queue them.
8073                  */
8074                 last = skb_shinfo(skb)->nr_frags - 1;
8075                 for (i = 0; i <= last; i++) {
8076                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8077
8078                         len = skb_frag_size(frag);
8079                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8080                                                    len, DMA_TO_DEVICE);
8081
8082                         tnapi->tx_buffers[entry].skb = NULL;
8083                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8084                                            mapping);
8085                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8086                                 goto dma_error;
8087
8088                         if (!budget ||
8089                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8090                                             len, base_flags |
8091                                             ((i == last) ? TXD_FLAG_END : 0),
8092                                             tmp_mss, vlan)) {
8093                                 would_hit_hwbug = 1;
8094                                 break;
8095                         }
8096                 }
8097         }
8098
8099         if (would_hit_hwbug) {
8100                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8101
8102                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8103                         /* If it's a TSO packet, do GSO instead of
8104                          * allocating and copying to a large linear SKB
8105                          */
8106                         if (ip_tot_len) {
8107                                 iph->check = ip_csum;
8108                                 iph->tot_len = ip_tot_len;
8109                         }
8110                         tcph->check = tcp_csum;
8111                         return tg3_tso_bug(tp, tnapi, txq, skb);
8112                 }
8113
8114                 /* If the workaround fails due to memory/mapping
8115                  * failure, silently drop this packet.
8116                  */
8117                 entry = tnapi->tx_prod;
8118                 budget = tg3_tx_avail(tnapi);
8119                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8120                                                 base_flags, mss, vlan))
8121                         goto drop_nofree;
8122         }
8123
8124         skb_tx_timestamp(skb);
8125         netdev_tx_sent_queue(txq, skb->len);
8126
8127         /* Sync BD data before updating mailbox */
8128         wmb();
8129
8130         tnapi->tx_prod = entry;
8131         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8132                 netif_tx_stop_queue(txq);
8133
8134                 /* netif_tx_stop_queue() must be done before checking
8135                  * checking tx index in tg3_tx_avail() below, because in
8136                  * tg3_tx(), we update tx index before checking for
8137                  * netif_tx_queue_stopped().
8138                  */
8139                 smp_mb();
8140                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8141                         netif_tx_wake_queue(txq);
8142         }
8143
8144         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8145                 /* Packets are ready, update Tx producer idx on card. */
8146                 tw32_tx_mbox(tnapi->prodmbox, entry);
8147                 mmiowb();
8148         }
8149
8150         return NETDEV_TX_OK;
8151
8152 dma_error:
8153         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8154         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8155 drop:
8156         dev_kfree_skb_any(skb);
8157 drop_nofree:
8158         tp->tx_dropped++;
8159         return NETDEV_TX_OK;
8160 }
8161
8162 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8163 {
8164         if (enable) {
8165                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8166                                   MAC_MODE_PORT_MODE_MASK);
8167
8168                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8169
8170                 if (!tg3_flag(tp, 5705_PLUS))
8171                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8172
8173                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8174                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8175                 else
8176                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8177         } else {
8178                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8179
8180                 if (tg3_flag(tp, 5705_PLUS) ||
8181                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8182                     tg3_asic_rev(tp) == ASIC_REV_5700)
8183                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8184         }
8185
8186         tw32(MAC_MODE, tp->mac_mode);
8187         udelay(40);
8188 }
8189
8190 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8191 {
8192         u32 val, bmcr, mac_mode, ptest = 0;
8193
8194         tg3_phy_toggle_apd(tp, false);
8195         tg3_phy_toggle_automdix(tp, false);
8196
8197         if (extlpbk && tg3_phy_set_extloopbk(tp))
8198                 return -EIO;
8199
8200         bmcr = BMCR_FULLDPLX;
8201         switch (speed) {
8202         case SPEED_10:
8203                 break;
8204         case SPEED_100:
8205                 bmcr |= BMCR_SPEED100;
8206                 break;
8207         case SPEED_1000:
8208         default:
8209                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8210                         speed = SPEED_100;
8211                         bmcr |= BMCR_SPEED100;
8212                 } else {
8213                         speed = SPEED_1000;
8214                         bmcr |= BMCR_SPEED1000;
8215                 }
8216         }
8217
8218         if (extlpbk) {
8219                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8220                         tg3_readphy(tp, MII_CTRL1000, &val);
8221                         val |= CTL1000_AS_MASTER |
8222                                CTL1000_ENABLE_MASTER;
8223                         tg3_writephy(tp, MII_CTRL1000, val);
8224                 } else {
8225                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8226                                 MII_TG3_FET_PTEST_TRIM_2;
8227                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8228                 }
8229         } else
8230                 bmcr |= BMCR_LOOPBACK;
8231
8232         tg3_writephy(tp, MII_BMCR, bmcr);
8233
8234         /* The write needs to be flushed for the FETs */
8235         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8236                 tg3_readphy(tp, MII_BMCR, &bmcr);
8237
8238         udelay(40);
8239
8240         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8241             tg3_asic_rev(tp) == ASIC_REV_5785) {
8242                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8243                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8244                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8245
8246                 /* The write needs to be flushed for the AC131 */
8247                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8248         }
8249
8250         /* Reset to prevent losing 1st rx packet intermittently */
8251         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8252             tg3_flag(tp, 5780_CLASS)) {
8253                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8254                 udelay(10);
8255                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8256         }
8257
8258         mac_mode = tp->mac_mode &
8259                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8260         if (speed == SPEED_1000)
8261                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8262         else
8263                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8264
8265         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8266                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8267
8268                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8269                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8270                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8271                         mac_mode |= MAC_MODE_LINK_POLARITY;
8272
8273                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8274                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8275         }
8276
8277         tw32(MAC_MODE, mac_mode);
8278         udelay(40);
8279
8280         return 0;
8281 }
8282
8283 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8284 {
8285         struct tg3 *tp = netdev_priv(dev);
8286
8287         if (features & NETIF_F_LOOPBACK) {
8288                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8289                         return;
8290
8291                 spin_lock_bh(&tp->lock);
8292                 tg3_mac_loopback(tp, true);
8293                 netif_carrier_on(tp->dev);
8294                 spin_unlock_bh(&tp->lock);
8295                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8296         } else {
8297                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8298                         return;
8299
8300                 spin_lock_bh(&tp->lock);
8301                 tg3_mac_loopback(tp, false);
8302                 /* Force link status check */
8303                 tg3_setup_phy(tp, true);
8304                 spin_unlock_bh(&tp->lock);
8305                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8306         }
8307 }
8308
8309 static netdev_features_t tg3_fix_features(struct net_device *dev,
8310         netdev_features_t features)
8311 {
8312         struct tg3 *tp = netdev_priv(dev);
8313
8314         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8315                 features &= ~NETIF_F_ALL_TSO;
8316
8317         return features;
8318 }
8319
8320 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8321 {
8322         netdev_features_t changed = dev->features ^ features;
8323
8324         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8325                 tg3_set_loopback(dev, features);
8326
8327         return 0;
8328 }
8329
8330 static void tg3_rx_prodring_free(struct tg3 *tp,
8331                                  struct tg3_rx_prodring_set *tpr)
8332 {
8333         int i;
8334
8335         if (tpr != &tp->napi[0].prodring) {
8336                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8337                      i = (i + 1) & tp->rx_std_ring_mask)
8338                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8339                                         tp->rx_pkt_map_sz);
8340
8341                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8342                         for (i = tpr->rx_jmb_cons_idx;
8343                              i != tpr->rx_jmb_prod_idx;
8344                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8345                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8346                                                 TG3_RX_JMB_MAP_SZ);
8347                         }
8348                 }
8349
8350                 return;
8351         }
8352
8353         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8354                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8355                                 tp->rx_pkt_map_sz);
8356
8357         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8358                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8359                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8360                                         TG3_RX_JMB_MAP_SZ);
8361         }
8362 }
8363
8364 /* Initialize rx rings for packet processing.
8365  *
8366  * The chip has been shut down and the driver detached from
8367  * the networking, so no interrupts or new tx packets will
8368  * end up in the driver.  tp->{tx,}lock are held and thus
8369  * we may not sleep.
8370  */
8371 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8372                                  struct tg3_rx_prodring_set *tpr)
8373 {
8374         u32 i, rx_pkt_dma_sz;
8375
8376         tpr->rx_std_cons_idx = 0;
8377         tpr->rx_std_prod_idx = 0;
8378         tpr->rx_jmb_cons_idx = 0;
8379         tpr->rx_jmb_prod_idx = 0;
8380
8381         if (tpr != &tp->napi[0].prodring) {
8382                 memset(&tpr->rx_std_buffers[0], 0,
8383                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8384                 if (tpr->rx_jmb_buffers)
8385                         memset(&tpr->rx_jmb_buffers[0], 0,
8386                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8387                 goto done;
8388         }
8389
8390         /* Zero out all descriptors. */
8391         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8392
8393         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8394         if (tg3_flag(tp, 5780_CLASS) &&
8395             tp->dev->mtu > ETH_DATA_LEN)
8396                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8397         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8398
8399         /* Initialize invariants of the rings, we only set this
8400          * stuff once.  This works because the card does not
8401          * write into the rx buffer posting rings.
8402          */
8403         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8404                 struct tg3_rx_buffer_desc *rxd;
8405
8406                 rxd = &tpr->rx_std[i];
8407                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8408                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8409                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8410                                (i << RXD_OPAQUE_INDEX_SHIFT));
8411         }
8412
8413         /* Now allocate fresh SKBs for each rx ring. */
8414         for (i = 0; i < tp->rx_pending; i++) {
8415                 unsigned int frag_size;
8416
8417                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8418                                       &frag_size) < 0) {
8419                         netdev_warn(tp->dev,
8420                                     "Using a smaller RX standard ring. Only "
8421                                     "%d out of %d buffers were allocated "
8422                                     "successfully\n", i, tp->rx_pending);
8423                         if (i == 0)
8424                                 goto initfail;
8425                         tp->rx_pending = i;
8426                         break;
8427                 }
8428         }
8429
8430         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8431                 goto done;
8432
8433         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8434
8435         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8436                 goto done;
8437
8438         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8439                 struct tg3_rx_buffer_desc *rxd;
8440
8441                 rxd = &tpr->rx_jmb[i].std;
8442                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8443                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8444                                   RXD_FLAG_JUMBO;
8445                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8446                        (i << RXD_OPAQUE_INDEX_SHIFT));
8447         }
8448
8449         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8450                 unsigned int frag_size;
8451
8452                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8453                                       &frag_size) < 0) {
8454                         netdev_warn(tp->dev,
8455                                     "Using a smaller RX jumbo ring. Only %d "
8456                                     "out of %d buffers were allocated "
8457                                     "successfully\n", i, tp->rx_jumbo_pending);
8458                         if (i == 0)
8459                                 goto initfail;
8460                         tp->rx_jumbo_pending = i;
8461                         break;
8462                 }
8463         }
8464
8465 done:
8466         return 0;
8467
8468 initfail:
8469         tg3_rx_prodring_free(tp, tpr);
8470         return -ENOMEM;
8471 }
8472
8473 static void tg3_rx_prodring_fini(struct tg3 *tp,
8474                                  struct tg3_rx_prodring_set *tpr)
8475 {
8476         kfree(tpr->rx_std_buffers);
8477         tpr->rx_std_buffers = NULL;
8478         kfree(tpr->rx_jmb_buffers);
8479         tpr->rx_jmb_buffers = NULL;
8480         if (tpr->rx_std) {
8481                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8482                                   tpr->rx_std, tpr->rx_std_mapping);
8483                 tpr->rx_std = NULL;
8484         }
8485         if (tpr->rx_jmb) {
8486                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8487                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8488                 tpr->rx_jmb = NULL;
8489         }
8490 }
8491
8492 static int tg3_rx_prodring_init(struct tg3 *tp,
8493                                 struct tg3_rx_prodring_set *tpr)
8494 {
8495         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8496                                       GFP_KERNEL);
8497         if (!tpr->rx_std_buffers)
8498                 return -ENOMEM;
8499
8500         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8501                                          TG3_RX_STD_RING_BYTES(tp),
8502                                          &tpr->rx_std_mapping,
8503                                          GFP_KERNEL);
8504         if (!tpr->rx_std)
8505                 goto err_out;
8506
8507         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8508                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8509                                               GFP_KERNEL);
8510                 if (!tpr->rx_jmb_buffers)
8511                         goto err_out;
8512
8513                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8514                                                  TG3_RX_JMB_RING_BYTES(tp),
8515                                                  &tpr->rx_jmb_mapping,
8516                                                  GFP_KERNEL);
8517                 if (!tpr->rx_jmb)
8518                         goto err_out;
8519         }
8520
8521         return 0;
8522
8523 err_out:
8524         tg3_rx_prodring_fini(tp, tpr);
8525         return -ENOMEM;
8526 }
8527
8528 /* Free up pending packets in all rx/tx rings.
8529  *
8530  * The chip has been shut down and the driver detached from
8531  * the networking, so no interrupts or new tx packets will
8532  * end up in the driver.  tp->{tx,}lock is not held and we are not
8533  * in an interrupt context and thus may sleep.
8534  */
8535 static void tg3_free_rings(struct tg3 *tp)
8536 {
8537         int i, j;
8538
8539         for (j = 0; j < tp->irq_cnt; j++) {
8540                 struct tg3_napi *tnapi = &tp->napi[j];
8541
8542                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8543
8544                 if (!tnapi->tx_buffers)
8545                         continue;
8546
8547                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8548                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8549
8550                         if (!skb)
8551                                 continue;
8552
8553                         tg3_tx_skb_unmap(tnapi, i,
8554                                          skb_shinfo(skb)->nr_frags - 1);
8555
8556                         dev_consume_skb_any(skb);
8557                 }
8558                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8559         }
8560 }
8561
8562 /* Initialize tx/rx rings for packet processing.
8563  *
8564  * The chip has been shut down and the driver detached from
8565  * the networking, so no interrupts or new tx packets will
8566  * end up in the driver.  tp->{tx,}lock are held and thus
8567  * we may not sleep.
8568  */
8569 static int tg3_init_rings(struct tg3 *tp)
8570 {
8571         int i;
8572
8573         /* Free up all the SKBs. */
8574         tg3_free_rings(tp);
8575
8576         for (i = 0; i < tp->irq_cnt; i++) {
8577                 struct tg3_napi *tnapi = &tp->napi[i];
8578
8579                 tnapi->last_tag = 0;
8580                 tnapi->last_irq_tag = 0;
8581                 tnapi->hw_status->status = 0;
8582                 tnapi->hw_status->status_tag = 0;
8583                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8584
8585                 tnapi->tx_prod = 0;
8586                 tnapi->tx_cons = 0;
8587                 if (tnapi->tx_ring)
8588                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8589
8590                 tnapi->rx_rcb_ptr = 0;
8591                 if (tnapi->rx_rcb)
8592                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8593
8594                 if (tnapi->prodring.rx_std &&
8595                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8596                         tg3_free_rings(tp);
8597                         return -ENOMEM;
8598                 }
8599         }
8600
8601         return 0;
8602 }
8603
8604 static void tg3_mem_tx_release(struct tg3 *tp)
8605 {
8606         int i;
8607
8608         for (i = 0; i < tp->irq_max; i++) {
8609                 struct tg3_napi *tnapi = &tp->napi[i];
8610
8611                 if (tnapi->tx_ring) {
8612                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8613                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8614                         tnapi->tx_ring = NULL;
8615                 }
8616
8617                 kfree(tnapi->tx_buffers);
8618                 tnapi->tx_buffers = NULL;
8619         }
8620 }
8621
8622 static int tg3_mem_tx_acquire(struct tg3 *tp)
8623 {
8624         int i;
8625         struct tg3_napi *tnapi = &tp->napi[0];
8626
8627         /* If multivector TSS is enabled, vector 0 does not handle
8628          * tx interrupts.  Don't allocate any resources for it.
8629          */
8630         if (tg3_flag(tp, ENABLE_TSS))
8631                 tnapi++;
8632
8633         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8634                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8635                                             sizeof(struct tg3_tx_ring_info),
8636                                             GFP_KERNEL);
8637                 if (!tnapi->tx_buffers)
8638                         goto err_out;
8639
8640                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8641                                                     TG3_TX_RING_BYTES,
8642                                                     &tnapi->tx_desc_mapping,
8643                                                     GFP_KERNEL);
8644                 if (!tnapi->tx_ring)
8645                         goto err_out;
8646         }
8647
8648         return 0;
8649
8650 err_out:
8651         tg3_mem_tx_release(tp);
8652         return -ENOMEM;
8653 }
8654
8655 static void tg3_mem_rx_release(struct tg3 *tp)
8656 {
8657         int i;
8658
8659         for (i = 0; i < tp->irq_max; i++) {
8660                 struct tg3_napi *tnapi = &tp->napi[i];
8661
8662                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8663
8664                 if (!tnapi->rx_rcb)
8665                         continue;
8666
8667                 dma_free_coherent(&tp->pdev->dev,
8668                                   TG3_RX_RCB_RING_BYTES(tp),
8669                                   tnapi->rx_rcb,
8670                                   tnapi->rx_rcb_mapping);
8671                 tnapi->rx_rcb = NULL;
8672         }
8673 }
8674
8675 static int tg3_mem_rx_acquire(struct tg3 *tp)
8676 {
8677         unsigned int i, limit;
8678
8679         limit = tp->rxq_cnt;
8680
8681         /* If RSS is enabled, we need a (dummy) producer ring
8682          * set on vector zero.  This is the true hw prodring.
8683          */
8684         if (tg3_flag(tp, ENABLE_RSS))
8685                 limit++;
8686
8687         for (i = 0; i < limit; i++) {
8688                 struct tg3_napi *tnapi = &tp->napi[i];
8689
8690                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8691                         goto err_out;
8692
8693                 /* If multivector RSS is enabled, vector 0
8694                  * does not handle rx or tx interrupts.
8695                  * Don't allocate any resources for it.
8696                  */
8697                 if (!i && tg3_flag(tp, ENABLE_RSS))
8698                         continue;
8699
8700                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8701                                                     TG3_RX_RCB_RING_BYTES(tp),
8702                                                     &tnapi->rx_rcb_mapping,
8703                                                     GFP_KERNEL);
8704                 if (!tnapi->rx_rcb)
8705                         goto err_out;
8706         }
8707
8708         return 0;
8709
8710 err_out:
8711         tg3_mem_rx_release(tp);
8712         return -ENOMEM;
8713 }
8714
8715 /*
8716  * Must not be invoked with interrupt sources disabled and
8717  * the hardware shutdown down.
8718  */
8719 static void tg3_free_consistent(struct tg3 *tp)
8720 {
8721         int i;
8722
8723         for (i = 0; i < tp->irq_cnt; i++) {
8724                 struct tg3_napi *tnapi = &tp->napi[i];
8725
8726                 if (tnapi->hw_status) {
8727                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8728                                           tnapi->hw_status,
8729                                           tnapi->status_mapping);
8730                         tnapi->hw_status = NULL;
8731                 }
8732         }
8733
8734         tg3_mem_rx_release(tp);
8735         tg3_mem_tx_release(tp);
8736
8737         /* tp->hw_stats can be referenced safely:
8738          *     1. under rtnl_lock
8739          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8740          */
8741         if (tp->hw_stats) {
8742                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8743                                   tp->hw_stats, tp->stats_mapping);
8744                 tp->hw_stats = NULL;
8745         }
8746 }
8747
8748 /*
8749  * Must not be invoked with interrupt sources disabled and
8750  * the hardware shutdown down.  Can sleep.
8751  */
8752 static int tg3_alloc_consistent(struct tg3 *tp)
8753 {
8754         int i;
8755
8756         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8757                                            sizeof(struct tg3_hw_stats),
8758                                            &tp->stats_mapping, GFP_KERNEL);
8759         if (!tp->hw_stats)
8760                 goto err_out;
8761
8762         for (i = 0; i < tp->irq_cnt; i++) {
8763                 struct tg3_napi *tnapi = &tp->napi[i];
8764                 struct tg3_hw_status *sblk;
8765
8766                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8767                                                        TG3_HW_STATUS_SIZE,
8768                                                        &tnapi->status_mapping,
8769                                                        GFP_KERNEL);
8770                 if (!tnapi->hw_status)
8771                         goto err_out;
8772
8773                 sblk = tnapi->hw_status;
8774
8775                 if (tg3_flag(tp, ENABLE_RSS)) {
8776                         u16 *prodptr = NULL;
8777
8778                         /*
8779                          * When RSS is enabled, the status block format changes
8780                          * slightly.  The "rx_jumbo_consumer", "reserved",
8781                          * and "rx_mini_consumer" members get mapped to the
8782                          * other three rx return ring producer indexes.
8783                          */
8784                         switch (i) {
8785                         case 1:
8786                                 prodptr = &sblk->idx[0].rx_producer;
8787                                 break;
8788                         case 2:
8789                                 prodptr = &sblk->rx_jumbo_consumer;
8790                                 break;
8791                         case 3:
8792                                 prodptr = &sblk->reserved;
8793                                 break;
8794                         case 4:
8795                                 prodptr = &sblk->rx_mini_consumer;
8796                                 break;
8797                         }
8798                         tnapi->rx_rcb_prod_idx = prodptr;
8799                 } else {
8800                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8801                 }
8802         }
8803
8804         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8805                 goto err_out;
8806
8807         return 0;
8808
8809 err_out:
8810         tg3_free_consistent(tp);
8811         return -ENOMEM;
8812 }
8813
8814 #define MAX_WAIT_CNT 1000
8815
8816 /* To stop a block, clear the enable bit and poll till it
8817  * clears.  tp->lock is held.
8818  */
8819 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8820 {
8821         unsigned int i;
8822         u32 val;
8823
8824         if (tg3_flag(tp, 5705_PLUS)) {
8825                 switch (ofs) {
8826                 case RCVLSC_MODE:
8827                 case DMAC_MODE:
8828                 case MBFREE_MODE:
8829                 case BUFMGR_MODE:
8830                 case MEMARB_MODE:
8831                         /* We can't enable/disable these bits of the
8832                          * 5705/5750, just say success.
8833                          */
8834                         return 0;
8835
8836                 default:
8837                         break;
8838                 }
8839         }
8840
8841         val = tr32(ofs);
8842         val &= ~enable_bit;
8843         tw32_f(ofs, val);
8844
8845         for (i = 0; i < MAX_WAIT_CNT; i++) {
8846                 if (pci_channel_offline(tp->pdev)) {
8847                         dev_err(&tp->pdev->dev,
8848                                 "tg3_stop_block device offline, "
8849                                 "ofs=%lx enable_bit=%x\n",
8850                                 ofs, enable_bit);
8851                         return -ENODEV;
8852                 }
8853
8854                 udelay(100);
8855                 val = tr32(ofs);
8856                 if ((val & enable_bit) == 0)
8857                         break;
8858         }
8859
8860         if (i == MAX_WAIT_CNT && !silent) {
8861                 dev_err(&tp->pdev->dev,
8862                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8863                         ofs, enable_bit);
8864                 return -ENODEV;
8865         }
8866
8867         return 0;
8868 }
8869
8870 /* tp->lock is held. */
8871 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8872 {
8873         int i, err;
8874
8875         tg3_disable_ints(tp);
8876
8877         if (pci_channel_offline(tp->pdev)) {
8878                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8879                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8880                 err = -ENODEV;
8881                 goto err_no_dev;
8882         }
8883
8884         tp->rx_mode &= ~RX_MODE_ENABLE;
8885         tw32_f(MAC_RX_MODE, tp->rx_mode);
8886         udelay(10);
8887
8888         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8890         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8891         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8892         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8893         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8894
8895         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8896         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8897         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8898         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8899         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8900         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8901         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8902
8903         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8904         tw32_f(MAC_MODE, tp->mac_mode);
8905         udelay(40);
8906
8907         tp->tx_mode &= ~TX_MODE_ENABLE;
8908         tw32_f(MAC_TX_MODE, tp->tx_mode);
8909
8910         for (i = 0; i < MAX_WAIT_CNT; i++) {
8911                 udelay(100);
8912                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8913                         break;
8914         }
8915         if (i >= MAX_WAIT_CNT) {
8916                 dev_err(&tp->pdev->dev,
8917                         "%s timed out, TX_MODE_ENABLE will not clear "
8918                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8919                 err |= -ENODEV;
8920         }
8921
8922         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8923         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8924         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8925
8926         tw32(FTQ_RESET, 0xffffffff);
8927         tw32(FTQ_RESET, 0x00000000);
8928
8929         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8930         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8931
8932 err_no_dev:
8933         for (i = 0; i < tp->irq_cnt; i++) {
8934                 struct tg3_napi *tnapi = &tp->napi[i];
8935                 if (tnapi->hw_status)
8936                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8937         }
8938
8939         return err;
8940 }
8941
8942 /* Save PCI command register before chip reset */
8943 static void tg3_save_pci_state(struct tg3 *tp)
8944 {
8945         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8946 }
8947
8948 /* Restore PCI state after chip reset */
8949 static void tg3_restore_pci_state(struct tg3 *tp)
8950 {
8951         u32 val;
8952
8953         /* Re-enable indirect register accesses. */
8954         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8955                                tp->misc_host_ctrl);
8956
8957         /* Set MAX PCI retry to zero. */
8958         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8959         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8960             tg3_flag(tp, PCIX_MODE))
8961                 val |= PCISTATE_RETRY_SAME_DMA;
8962         /* Allow reads and writes to the APE register and memory space. */
8963         if (tg3_flag(tp, ENABLE_APE))
8964                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8965                        PCISTATE_ALLOW_APE_SHMEM_WR |
8966                        PCISTATE_ALLOW_APE_PSPACE_WR;
8967         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8968
8969         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8970
8971         if (!tg3_flag(tp, PCI_EXPRESS)) {
8972                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8973                                       tp->pci_cacheline_sz);
8974                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8975                                       tp->pci_lat_timer);
8976         }
8977
8978         /* Make sure PCI-X relaxed ordering bit is clear. */
8979         if (tg3_flag(tp, PCIX_MODE)) {
8980                 u16 pcix_cmd;
8981
8982                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8983                                      &pcix_cmd);
8984                 pcix_cmd &= ~PCI_X_CMD_ERO;
8985                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8986                                       pcix_cmd);
8987         }
8988
8989         if (tg3_flag(tp, 5780_CLASS)) {
8990
8991                 /* Chip reset on 5780 will reset MSI enable bit,
8992                  * so need to restore it.
8993                  */
8994                 if (tg3_flag(tp, USING_MSI)) {
8995                         u16 ctrl;
8996
8997                         pci_read_config_word(tp->pdev,
8998                                              tp->msi_cap + PCI_MSI_FLAGS,
8999                                              &ctrl);
9000                         pci_write_config_word(tp->pdev,
9001                                               tp->msi_cap + PCI_MSI_FLAGS,
9002                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9003                         val = tr32(MSGINT_MODE);
9004                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9005                 }
9006         }
9007 }
9008
9009 static void tg3_override_clk(struct tg3 *tp)
9010 {
9011         u32 val;
9012
9013         switch (tg3_asic_rev(tp)) {
9014         case ASIC_REV_5717:
9015                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9016                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9017                      TG3_CPMU_MAC_ORIDE_ENABLE);
9018                 break;
9019
9020         case ASIC_REV_5719:
9021         case ASIC_REV_5720:
9022                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9023                 break;
9024
9025         default:
9026                 return;
9027         }
9028 }
9029
9030 static void tg3_restore_clk(struct tg3 *tp)
9031 {
9032         u32 val;
9033
9034         switch (tg3_asic_rev(tp)) {
9035         case ASIC_REV_5717:
9036                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9037                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9038                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9039                 break;
9040
9041         case ASIC_REV_5719:
9042         case ASIC_REV_5720:
9043                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9044                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9045                 break;
9046
9047         default:
9048                 return;
9049         }
9050 }
9051
9052 /* tp->lock is held. */
9053 static int tg3_chip_reset(struct tg3 *tp)
9054         __releases(tp->lock)
9055         __acquires(tp->lock)
9056 {
9057         u32 val;
9058         void (*write_op)(struct tg3 *, u32, u32);
9059         int i, err;
9060
9061         if (!pci_device_is_present(tp->pdev))
9062                 return -ENODEV;
9063
9064         tg3_nvram_lock(tp);
9065
9066         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9067
9068         /* No matching tg3_nvram_unlock() after this because
9069          * chip reset below will undo the nvram lock.
9070          */
9071         tp->nvram_lock_cnt = 0;
9072
9073         /* GRC_MISC_CFG core clock reset will clear the memory
9074          * enable bit in PCI register 4 and the MSI enable bit
9075          * on some chips, so we save relevant registers here.
9076          */
9077         tg3_save_pci_state(tp);
9078
9079         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9080             tg3_flag(tp, 5755_PLUS))
9081                 tw32(GRC_FASTBOOT_PC, 0);
9082
9083         /*
9084          * We must avoid the readl() that normally takes place.
9085          * It locks machines, causes machine checks, and other
9086          * fun things.  So, temporarily disable the 5701
9087          * hardware workaround, while we do the reset.
9088          */
9089         write_op = tp->write32;
9090         if (write_op == tg3_write_flush_reg32)
9091                 tp->write32 = tg3_write32;
9092
9093         /* Prevent the irq handler from reading or writing PCI registers
9094          * during chip reset when the memory enable bit in the PCI command
9095          * register may be cleared.  The chip does not generate interrupt
9096          * at this time, but the irq handler may still be called due to irq
9097          * sharing or irqpoll.
9098          */
9099         tg3_flag_set(tp, CHIP_RESETTING);
9100         for (i = 0; i < tp->irq_cnt; i++) {
9101                 struct tg3_napi *tnapi = &tp->napi[i];
9102                 if (tnapi->hw_status) {
9103                         tnapi->hw_status->status = 0;
9104                         tnapi->hw_status->status_tag = 0;
9105                 }
9106                 tnapi->last_tag = 0;
9107                 tnapi->last_irq_tag = 0;
9108         }
9109         smp_mb();
9110
9111         tg3_full_unlock(tp);
9112
9113         for (i = 0; i < tp->irq_cnt; i++)
9114                 synchronize_irq(tp->napi[i].irq_vec);
9115
9116         tg3_full_lock(tp, 0);
9117
9118         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9119                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9120                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9121         }
9122
9123         /* do the reset */
9124         val = GRC_MISC_CFG_CORECLK_RESET;
9125
9126         if (tg3_flag(tp, PCI_EXPRESS)) {
9127                 /* Force PCIe 1.0a mode */
9128                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9129                     !tg3_flag(tp, 57765_PLUS) &&
9130                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9131                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9132                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9133
9134                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9135                         tw32(GRC_MISC_CFG, (1 << 29));
9136                         val |= (1 << 29);
9137                 }
9138         }
9139
9140         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9141                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9142                 tw32(GRC_VCPU_EXT_CTRL,
9143                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9144         }
9145
9146         /* Set the clock to the highest frequency to avoid timeouts. With link
9147          * aware mode, the clock speed could be slow and bootcode does not
9148          * complete within the expected time. Override the clock to allow the
9149          * bootcode to finish sooner and then restore it.
9150          */
9151         tg3_override_clk(tp);
9152
9153         /* Manage gphy power for all CPMU absent PCIe devices. */
9154         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9155                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9156
9157         tw32(GRC_MISC_CFG, val);
9158
9159         /* restore 5701 hardware bug workaround write method */
9160         tp->write32 = write_op;
9161
9162         /* Unfortunately, we have to delay before the PCI read back.
9163          * Some 575X chips even will not respond to a PCI cfg access
9164          * when the reset command is given to the chip.
9165          *
9166          * How do these hardware designers expect things to work
9167          * properly if the PCI write is posted for a long period
9168          * of time?  It is always necessary to have some method by
9169          * which a register read back can occur to push the write
9170          * out which does the reset.
9171          *
9172          * For most tg3 variants the trick below was working.
9173          * Ho hum...
9174          */
9175         udelay(120);
9176
9177         /* Flush PCI posted writes.  The normal MMIO registers
9178          * are inaccessible at this time so this is the only
9179          * way to make this reliably (actually, this is no longer
9180          * the case, see above).  I tried to use indirect
9181          * register read/write but this upset some 5701 variants.
9182          */
9183         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9184
9185         udelay(120);
9186
9187         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9188                 u16 val16;
9189
9190                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9191                         int j;
9192                         u32 cfg_val;
9193
9194                         /* Wait for link training to complete.  */
9195                         for (j = 0; j < 5000; j++)
9196                                 udelay(100);
9197
9198                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9199                         pci_write_config_dword(tp->pdev, 0xc4,
9200                                                cfg_val | (1 << 15));
9201                 }
9202
9203                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9204                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9205                 /*
9206                  * Older PCIe devices only support the 128 byte
9207                  * MPS setting.  Enforce the restriction.
9208                  */
9209                 if (!tg3_flag(tp, CPMU_PRESENT))
9210                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9211                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9212
9213                 /* Clear error status */
9214                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9215                                       PCI_EXP_DEVSTA_CED |
9216                                       PCI_EXP_DEVSTA_NFED |
9217                                       PCI_EXP_DEVSTA_FED |
9218                                       PCI_EXP_DEVSTA_URD);
9219         }
9220
9221         tg3_restore_pci_state(tp);
9222
9223         tg3_flag_clear(tp, CHIP_RESETTING);
9224         tg3_flag_clear(tp, ERROR_PROCESSED);
9225
9226         val = 0;
9227         if (tg3_flag(tp, 5780_CLASS))
9228                 val = tr32(MEMARB_MODE);
9229         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9230
9231         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9232                 tg3_stop_fw(tp);
9233                 tw32(0x5000, 0x400);
9234         }
9235
9236         if (tg3_flag(tp, IS_SSB_CORE)) {
9237                 /*
9238                  * BCM4785: In order to avoid repercussions from using
9239                  * potentially defective internal ROM, stop the Rx RISC CPU,
9240                  * which is not required.
9241                  */
9242                 tg3_stop_fw(tp);
9243                 tg3_halt_cpu(tp, RX_CPU_BASE);
9244         }
9245
9246         err = tg3_poll_fw(tp);
9247         if (err)
9248                 return err;
9249
9250         tw32(GRC_MODE, tp->grc_mode);
9251
9252         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9253                 val = tr32(0xc4);
9254
9255                 tw32(0xc4, val | (1 << 15));
9256         }
9257
9258         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9259             tg3_asic_rev(tp) == ASIC_REV_5705) {
9260                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9261                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9262                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9263                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9264         }
9265
9266         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9267                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9268                 val = tp->mac_mode;
9269         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9270                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9271                 val = tp->mac_mode;
9272         } else
9273                 val = 0;
9274
9275         tw32_f(MAC_MODE, val);
9276         udelay(40);
9277
9278         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9279
9280         tg3_mdio_start(tp);
9281
9282         if (tg3_flag(tp, PCI_EXPRESS) &&
9283             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9284             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9285             !tg3_flag(tp, 57765_PLUS)) {
9286                 val = tr32(0x7c00);
9287
9288                 tw32(0x7c00, val | (1 << 25));
9289         }
9290
9291         tg3_restore_clk(tp);
9292
9293         /* Reprobe ASF enable state.  */
9294         tg3_flag_clear(tp, ENABLE_ASF);
9295         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9296                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9297
9298         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9299         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9300         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9301                 u32 nic_cfg;
9302
9303                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9304                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9305                         tg3_flag_set(tp, ENABLE_ASF);
9306                         tp->last_event_jiffies = jiffies;
9307                         if (tg3_flag(tp, 5750_PLUS))
9308                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9309
9310                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9311                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9312                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9313                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9314                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9315                 }
9316         }
9317
9318         return 0;
9319 }
9320
9321 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9322 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9323 static void __tg3_set_rx_mode(struct net_device *);
9324
9325 /* tp->lock is held. */
9326 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9327 {
9328         int err;
9329
9330         tg3_stop_fw(tp);
9331
9332         tg3_write_sig_pre_reset(tp, kind);
9333
9334         tg3_abort_hw(tp, silent);
9335         err = tg3_chip_reset(tp);
9336
9337         __tg3_set_mac_addr(tp, false);
9338
9339         tg3_write_sig_legacy(tp, kind);
9340         tg3_write_sig_post_reset(tp, kind);
9341
9342         if (tp->hw_stats) {
9343                 /* Save the stats across chip resets... */
9344                 tg3_get_nstats(tp, &tp->net_stats_prev);
9345                 tg3_get_estats(tp, &tp->estats_prev);
9346
9347                 /* And make sure the next sample is new data */
9348                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9349         }
9350
9351         return err;
9352 }
9353
9354 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9355 {
9356         struct tg3 *tp = netdev_priv(dev);
9357         struct sockaddr *addr = p;
9358         int err = 0;
9359         bool skip_mac_1 = false;
9360
9361         if (!is_valid_ether_addr(addr->sa_data))
9362                 return -EADDRNOTAVAIL;
9363
9364         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9365
9366         if (!netif_running(dev))
9367                 return 0;
9368
9369         if (tg3_flag(tp, ENABLE_ASF)) {
9370                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9371
9372                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9373                 addr0_low = tr32(MAC_ADDR_0_LOW);
9374                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9375                 addr1_low = tr32(MAC_ADDR_1_LOW);
9376
9377                 /* Skip MAC addr 1 if ASF is using it. */
9378                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9379                     !(addr1_high == 0 && addr1_low == 0))
9380                         skip_mac_1 = true;
9381         }
9382         spin_lock_bh(&tp->lock);
9383         __tg3_set_mac_addr(tp, skip_mac_1);
9384         __tg3_set_rx_mode(dev);
9385         spin_unlock_bh(&tp->lock);
9386
9387         return err;
9388 }
9389
9390 /* tp->lock is held. */
9391 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9392                            dma_addr_t mapping, u32 maxlen_flags,
9393                            u32 nic_addr)
9394 {
9395         tg3_write_mem(tp,
9396                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9397                       ((u64) mapping >> 32));
9398         tg3_write_mem(tp,
9399                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9400                       ((u64) mapping & 0xffffffff));
9401         tg3_write_mem(tp,
9402                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9403                        maxlen_flags);
9404
9405         if (!tg3_flag(tp, 5705_PLUS))
9406                 tg3_write_mem(tp,
9407                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9408                               nic_addr);
9409 }
9410
9411
9412 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9413 {
9414         int i = 0;
9415
9416         if (!tg3_flag(tp, ENABLE_TSS)) {
9417                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9418                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9419                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9420         } else {
9421                 tw32(HOSTCC_TXCOL_TICKS, 0);
9422                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9423                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9424
9425                 for (; i < tp->txq_cnt; i++) {
9426                         u32 reg;
9427
9428                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9429                         tw32(reg, ec->tx_coalesce_usecs);
9430                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9431                         tw32(reg, ec->tx_max_coalesced_frames);
9432                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9433                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9434                 }
9435         }
9436
9437         for (; i < tp->irq_max - 1; i++) {
9438                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9439                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9440                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9441         }
9442 }
9443
9444 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9445 {
9446         int i = 0;
9447         u32 limit = tp->rxq_cnt;
9448
9449         if (!tg3_flag(tp, ENABLE_RSS)) {
9450                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9451                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9452                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9453                 limit--;
9454         } else {
9455                 tw32(HOSTCC_RXCOL_TICKS, 0);
9456                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9457                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9458         }
9459
9460         for (; i < limit; i++) {
9461                 u32 reg;
9462
9463                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9464                 tw32(reg, ec->rx_coalesce_usecs);
9465                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9466                 tw32(reg, ec->rx_max_coalesced_frames);
9467                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9468                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9469         }
9470
9471         for (; i < tp->irq_max - 1; i++) {
9472                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9473                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9474                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9475         }
9476 }
9477
9478 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9479 {
9480         tg3_coal_tx_init(tp, ec);
9481         tg3_coal_rx_init(tp, ec);
9482
9483         if (!tg3_flag(tp, 5705_PLUS)) {
9484                 u32 val = ec->stats_block_coalesce_usecs;
9485
9486                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9487                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9488
9489                 if (!tp->link_up)
9490                         val = 0;
9491
9492                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9493         }
9494 }
9495
9496 /* tp->lock is held. */
9497 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9498 {
9499         u32 txrcb, limit;
9500
9501         /* Disable all transmit rings but the first. */
9502         if (!tg3_flag(tp, 5705_PLUS))
9503                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9504         else if (tg3_flag(tp, 5717_PLUS))
9505                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9506         else if (tg3_flag(tp, 57765_CLASS) ||
9507                  tg3_asic_rev(tp) == ASIC_REV_5762)
9508                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9509         else
9510                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9511
9512         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9513              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9514                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9515                               BDINFO_FLAGS_DISABLED);
9516 }
9517
9518 /* tp->lock is held. */
9519 static void tg3_tx_rcbs_init(struct tg3 *tp)
9520 {
9521         int i = 0;
9522         u32 txrcb = NIC_SRAM_SEND_RCB;
9523
9524         if (tg3_flag(tp, ENABLE_TSS))
9525                 i++;
9526
9527         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9528                 struct tg3_napi *tnapi = &tp->napi[i];
9529
9530                 if (!tnapi->tx_ring)
9531                         continue;
9532
9533                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9534                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9535                                NIC_SRAM_TX_BUFFER_DESC);
9536         }
9537 }
9538
9539 /* tp->lock is held. */
9540 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9541 {
9542         u32 rxrcb, limit;
9543
9544         /* Disable all receive return rings but the first. */
9545         if (tg3_flag(tp, 5717_PLUS))
9546                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9547         else if (!tg3_flag(tp, 5705_PLUS))
9548                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9549         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9550                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9551                  tg3_flag(tp, 57765_CLASS))
9552                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9553         else
9554                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9555
9556         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9557              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9558                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9559                               BDINFO_FLAGS_DISABLED);
9560 }
9561
9562 /* tp->lock is held. */
9563 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9564 {
9565         int i = 0;
9566         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9567
9568         if (tg3_flag(tp, ENABLE_RSS))
9569                 i++;
9570
9571         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9572                 struct tg3_napi *tnapi = &tp->napi[i];
9573
9574                 if (!tnapi->rx_rcb)
9575                         continue;
9576
9577                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9578                                (tp->rx_ret_ring_mask + 1) <<
9579                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9580         }
9581 }
9582
9583 /* tp->lock is held. */
9584 static void tg3_rings_reset(struct tg3 *tp)
9585 {
9586         int i;
9587         u32 stblk;
9588         struct tg3_napi *tnapi = &tp->napi[0];
9589
9590         tg3_tx_rcbs_disable(tp);
9591
9592         tg3_rx_ret_rcbs_disable(tp);
9593
9594         /* Disable interrupts */
9595         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9596         tp->napi[0].chk_msi_cnt = 0;
9597         tp->napi[0].last_rx_cons = 0;
9598         tp->napi[0].last_tx_cons = 0;
9599
9600         /* Zero mailbox registers. */
9601         if (tg3_flag(tp, SUPPORT_MSIX)) {
9602                 for (i = 1; i < tp->irq_max; i++) {
9603                         tp->napi[i].tx_prod = 0;
9604                         tp->napi[i].tx_cons = 0;
9605                         if (tg3_flag(tp, ENABLE_TSS))
9606                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9607                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9608                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9609                         tp->napi[i].chk_msi_cnt = 0;
9610                         tp->napi[i].last_rx_cons = 0;
9611                         tp->napi[i].last_tx_cons = 0;
9612                 }
9613                 if (!tg3_flag(tp, ENABLE_TSS))
9614                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9615         } else {
9616                 tp->napi[0].tx_prod = 0;
9617                 tp->napi[0].tx_cons = 0;
9618                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9619                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9620         }
9621
9622         /* Make sure the NIC-based send BD rings are disabled. */
9623         if (!tg3_flag(tp, 5705_PLUS)) {
9624                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9625                 for (i = 0; i < 16; i++)
9626                         tw32_tx_mbox(mbox + i * 8, 0);
9627         }
9628
9629         /* Clear status block in ram. */
9630         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9631
9632         /* Set status block DMA address */
9633         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9634              ((u64) tnapi->status_mapping >> 32));
9635         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9636              ((u64) tnapi->status_mapping & 0xffffffff));
9637
9638         stblk = HOSTCC_STATBLCK_RING1;
9639
9640         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9641                 u64 mapping = (u64)tnapi->status_mapping;
9642                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9643                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9644                 stblk += 8;
9645
9646                 /* Clear status block in ram. */
9647                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9648         }
9649
9650         tg3_tx_rcbs_init(tp);
9651         tg3_rx_ret_rcbs_init(tp);
9652 }
9653
9654 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9655 {
9656         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9657
9658         if (!tg3_flag(tp, 5750_PLUS) ||
9659             tg3_flag(tp, 5780_CLASS) ||
9660             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9661             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9662             tg3_flag(tp, 57765_PLUS))
9663                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9664         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9665                  tg3_asic_rev(tp) == ASIC_REV_5787)
9666                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9667         else
9668                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9669
9670         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9671         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9672
9673         val = min(nic_rep_thresh, host_rep_thresh);
9674         tw32(RCVBDI_STD_THRESH, val);
9675
9676         if (tg3_flag(tp, 57765_PLUS))
9677                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9678
9679         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9680                 return;
9681
9682         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9683
9684         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9685
9686         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9687         tw32(RCVBDI_JUMBO_THRESH, val);
9688
9689         if (tg3_flag(tp, 57765_PLUS))
9690                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9691 }
9692
9693 static inline u32 calc_crc(unsigned char *buf, int len)
9694 {
9695         u32 reg;
9696         u32 tmp;
9697         int j, k;
9698
9699         reg = 0xffffffff;
9700
9701         for (j = 0; j < len; j++) {
9702                 reg ^= buf[j];
9703
9704                 for (k = 0; k < 8; k++) {
9705                         tmp = reg & 0x01;
9706
9707                         reg >>= 1;
9708
9709                         if (tmp)
9710                                 reg ^= 0xedb88320;
9711                 }
9712         }
9713
9714         return ~reg;
9715 }
9716
9717 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9718 {
9719         /* accept or reject all multicast frames */
9720         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9721         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9722         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9723         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9724 }
9725
9726 static void __tg3_set_rx_mode(struct net_device *dev)
9727 {
9728         struct tg3 *tp = netdev_priv(dev);
9729         u32 rx_mode;
9730
9731         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9732                                   RX_MODE_KEEP_VLAN_TAG);
9733
9734 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9735         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9736          * flag clear.
9737          */
9738         if (!tg3_flag(tp, ENABLE_ASF))
9739                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9740 #endif
9741
9742         if (dev->flags & IFF_PROMISC) {
9743                 /* Promiscuous mode. */
9744                 rx_mode |= RX_MODE_PROMISC;
9745         } else if (dev->flags & IFF_ALLMULTI) {
9746                 /* Accept all multicast. */
9747                 tg3_set_multi(tp, 1);
9748         } else if (netdev_mc_empty(dev)) {
9749                 /* Reject all multicast. */
9750                 tg3_set_multi(tp, 0);
9751         } else {
9752                 /* Accept one or more multicast(s). */
9753                 struct netdev_hw_addr *ha;
9754                 u32 mc_filter[4] = { 0, };
9755                 u32 regidx;
9756                 u32 bit;
9757                 u32 crc;
9758
9759                 netdev_for_each_mc_addr(ha, dev) {
9760                         crc = calc_crc(ha->addr, ETH_ALEN);
9761                         bit = ~crc & 0x7f;
9762                         regidx = (bit & 0x60) >> 5;
9763                         bit &= 0x1f;
9764                         mc_filter[regidx] |= (1 << bit);
9765                 }
9766
9767                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9768                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9769                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9770                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9771         }
9772
9773         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9774                 rx_mode |= RX_MODE_PROMISC;
9775         } else if (!(dev->flags & IFF_PROMISC)) {
9776                 /* Add all entries into to the mac addr filter list */
9777                 int i = 0;
9778                 struct netdev_hw_addr *ha;
9779
9780                 netdev_for_each_uc_addr(ha, dev) {
9781                         __tg3_set_one_mac_addr(tp, ha->addr,
9782                                                i + TG3_UCAST_ADDR_IDX(tp));
9783                         i++;
9784                 }
9785         }
9786
9787         if (rx_mode != tp->rx_mode) {
9788                 tp->rx_mode = rx_mode;
9789                 tw32_f(MAC_RX_MODE, rx_mode);
9790                 udelay(10);
9791         }
9792 }
9793
9794 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9795 {
9796         int i;
9797
9798         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9799                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9800 }
9801
9802 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9803 {
9804         int i;
9805
9806         if (!tg3_flag(tp, SUPPORT_MSIX))
9807                 return;
9808
9809         if (tp->rxq_cnt == 1) {
9810                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9811                 return;
9812         }
9813
9814         /* Validate table against current IRQ count */
9815         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9816                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9817                         break;
9818         }
9819
9820         if (i != TG3_RSS_INDIR_TBL_SIZE)
9821                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9822 }
9823
9824 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9825 {
9826         int i = 0;
9827         u32 reg = MAC_RSS_INDIR_TBL_0;
9828
9829         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9830                 u32 val = tp->rss_ind_tbl[i];
9831                 i++;
9832                 for (; i % 8; i++) {
9833                         val <<= 4;
9834                         val |= tp->rss_ind_tbl[i];
9835                 }
9836                 tw32(reg, val);
9837                 reg += 4;
9838         }
9839 }
9840
9841 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9842 {
9843         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9844                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9845         else
9846                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9847 }
9848
9849 /* tp->lock is held. */
9850 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9851 {
9852         u32 val, rdmac_mode;
9853         int i, err, limit;
9854         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9855
9856         tg3_disable_ints(tp);
9857
9858         tg3_stop_fw(tp);
9859
9860         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9861
9862         if (tg3_flag(tp, INIT_COMPLETE))
9863                 tg3_abort_hw(tp, 1);
9864
9865         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9866             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9867                 tg3_phy_pull_config(tp);
9868                 tg3_eee_pull_config(tp, NULL);
9869                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9870         }
9871
9872         /* Enable MAC control of LPI */
9873         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9874                 tg3_setup_eee(tp);
9875
9876         if (reset_phy)
9877                 tg3_phy_reset(tp);
9878
9879         err = tg3_chip_reset(tp);
9880         if (err)
9881                 return err;
9882
9883         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9884
9885         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9886                 val = tr32(TG3_CPMU_CTRL);
9887                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9888                 tw32(TG3_CPMU_CTRL, val);
9889
9890                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9891                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9892                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9893                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9894
9895                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9896                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9897                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9898                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9899
9900                 val = tr32(TG3_CPMU_HST_ACC);
9901                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9902                 val |= CPMU_HST_ACC_MACCLK_6_25;
9903                 tw32(TG3_CPMU_HST_ACC, val);
9904         }
9905
9906         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9907                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9908                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9909                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9910                 tw32(PCIE_PWR_MGMT_THRESH, val);
9911
9912                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9913                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9914
9915                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9916
9917                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9918                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9919         }
9920
9921         if (tg3_flag(tp, L1PLLPD_EN)) {
9922                 u32 grc_mode = tr32(GRC_MODE);
9923
9924                 /* Access the lower 1K of PL PCIE block registers. */
9925                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9926                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9927
9928                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9929                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9930                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9931
9932                 tw32(GRC_MODE, grc_mode);
9933         }
9934
9935         if (tg3_flag(tp, 57765_CLASS)) {
9936                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9937                         u32 grc_mode = tr32(GRC_MODE);
9938
9939                         /* Access the lower 1K of PL PCIE block registers. */
9940                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9941                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9942
9943                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9944                                    TG3_PCIE_PL_LO_PHYCTL5);
9945                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9946                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9947
9948                         tw32(GRC_MODE, grc_mode);
9949                 }
9950
9951                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9952                         u32 grc_mode;
9953
9954                         /* Fix transmit hangs */
9955                         val = tr32(TG3_CPMU_PADRNG_CTL);
9956                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9957                         tw32(TG3_CPMU_PADRNG_CTL, val);
9958
9959                         grc_mode = tr32(GRC_MODE);
9960
9961                         /* Access the lower 1K of DL PCIE block registers. */
9962                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9963                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9964
9965                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9966                                    TG3_PCIE_DL_LO_FTSMAX);
9967                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9968                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9969                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9970
9971                         tw32(GRC_MODE, grc_mode);
9972                 }
9973
9974                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9975                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9976                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9977                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9978         }
9979
9980         /* This works around an issue with Athlon chipsets on
9981          * B3 tigon3 silicon.  This bit has no effect on any
9982          * other revision.  But do not set this on PCI Express
9983          * chips and don't even touch the clocks if the CPMU is present.
9984          */
9985         if (!tg3_flag(tp, CPMU_PRESENT)) {
9986                 if (!tg3_flag(tp, PCI_EXPRESS))
9987                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9988                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9989         }
9990
9991         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9992             tg3_flag(tp, PCIX_MODE)) {
9993                 val = tr32(TG3PCI_PCISTATE);
9994                 val |= PCISTATE_RETRY_SAME_DMA;
9995                 tw32(TG3PCI_PCISTATE, val);
9996         }
9997
9998         if (tg3_flag(tp, ENABLE_APE)) {
9999                 /* Allow reads and writes to the
10000                  * APE register and memory space.
10001                  */
10002                 val = tr32(TG3PCI_PCISTATE);
10003                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10004                        PCISTATE_ALLOW_APE_SHMEM_WR |
10005                        PCISTATE_ALLOW_APE_PSPACE_WR;
10006                 tw32(TG3PCI_PCISTATE, val);
10007         }
10008
10009         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10010                 /* Enable some hw fixes.  */
10011                 val = tr32(TG3PCI_MSI_DATA);
10012                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10013                 tw32(TG3PCI_MSI_DATA, val);
10014         }
10015
10016         /* Descriptor ring init may make accesses to the
10017          * NIC SRAM area to setup the TX descriptors, so we
10018          * can only do this after the hardware has been
10019          * successfully reset.
10020          */
10021         err = tg3_init_rings(tp);
10022         if (err)
10023                 return err;
10024
10025         if (tg3_flag(tp, 57765_PLUS)) {
10026                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10027                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10028                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10029                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10030                 if (!tg3_flag(tp, 57765_CLASS) &&
10031                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10032                     tg3_asic_rev(tp) != ASIC_REV_5762)
10033                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10034                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10035         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10036                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10037                 /* This value is determined during the probe time DMA
10038                  * engine test, tg3_test_dma.
10039                  */
10040                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10041         }
10042
10043         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10044                           GRC_MODE_4X_NIC_SEND_RINGS |
10045                           GRC_MODE_NO_TX_PHDR_CSUM |
10046                           GRC_MODE_NO_RX_PHDR_CSUM);
10047         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10048
10049         /* Pseudo-header checksum is done by hardware logic and not
10050          * the offload processers, so make the chip do the pseudo-
10051          * header checksums on receive.  For transmit it is more
10052          * convenient to do the pseudo-header checksum in software
10053          * as Linux does that on transmit for us in all cases.
10054          */
10055         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10056
10057         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10058         if (tp->rxptpctl)
10059                 tw32(TG3_RX_PTP_CTL,
10060                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10061
10062         if (tg3_flag(tp, PTP_CAPABLE))
10063                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10064
10065         tw32(GRC_MODE, tp->grc_mode | val);
10066
10067         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10068          * south bridge limitation. As a workaround, Driver is setting MRRS
10069          * to 2048 instead of default 4096.
10070          */
10071         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10072             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10073                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10074                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10075         }
10076
10077         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10078         val = tr32(GRC_MISC_CFG);
10079         val &= ~0xff;
10080         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10081         tw32(GRC_MISC_CFG, val);
10082
10083         /* Initialize MBUF/DESC pool. */
10084         if (tg3_flag(tp, 5750_PLUS)) {
10085                 /* Do nothing.  */
10086         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10087                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10088                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10089                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10090                 else
10091                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10092                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10093                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10094         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10095                 int fw_len;
10096
10097                 fw_len = tp->fw_len;
10098                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10099                 tw32(BUFMGR_MB_POOL_ADDR,
10100                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10101                 tw32(BUFMGR_MB_POOL_SIZE,
10102                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10103         }
10104
10105         if (tp->dev->mtu <= ETH_DATA_LEN) {
10106                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10107                      tp->bufmgr_config.mbuf_read_dma_low_water);
10108                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10109                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10110                 tw32(BUFMGR_MB_HIGH_WATER,
10111                      tp->bufmgr_config.mbuf_high_water);
10112         } else {
10113                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10114                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10115                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10116                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10117                 tw32(BUFMGR_MB_HIGH_WATER,
10118                      tp->bufmgr_config.mbuf_high_water_jumbo);
10119         }
10120         tw32(BUFMGR_DMA_LOW_WATER,
10121              tp->bufmgr_config.dma_low_water);
10122         tw32(BUFMGR_DMA_HIGH_WATER,
10123              tp->bufmgr_config.dma_high_water);
10124
10125         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10126         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10127                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10128         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10129             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10130             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10131             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10132                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10133         tw32(BUFMGR_MODE, val);
10134         for (i = 0; i < 2000; i++) {
10135                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10136                         break;
10137                 udelay(10);
10138         }
10139         if (i >= 2000) {
10140                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10141                 return -ENODEV;
10142         }
10143
10144         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10145                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10146
10147         tg3_setup_rxbd_thresholds(tp);
10148
10149         /* Initialize TG3_BDINFO's at:
10150          *  RCVDBDI_STD_BD:     standard eth size rx ring
10151          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10152          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10153          *
10154          * like so:
10155          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10156          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10157          *                              ring attribute flags
10158          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10159          *
10160          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10161          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10162          *
10163          * The size of each ring is fixed in the firmware, but the location is
10164          * configurable.
10165          */
10166         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10167              ((u64) tpr->rx_std_mapping >> 32));
10168         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10169              ((u64) tpr->rx_std_mapping & 0xffffffff));
10170         if (!tg3_flag(tp, 5717_PLUS))
10171                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10172                      NIC_SRAM_RX_BUFFER_DESC);
10173
10174         /* Disable the mini ring */
10175         if (!tg3_flag(tp, 5705_PLUS))
10176                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10177                      BDINFO_FLAGS_DISABLED);
10178
10179         /* Program the jumbo buffer descriptor ring control
10180          * blocks on those devices that have them.
10181          */
10182         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10183             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10184
10185                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10186                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10187                              ((u64) tpr->rx_jmb_mapping >> 32));
10188                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10189                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10190                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10191                               BDINFO_FLAGS_MAXLEN_SHIFT;
10192                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10193                              val | BDINFO_FLAGS_USE_EXT_RECV);
10194                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10195                             tg3_flag(tp, 57765_CLASS) ||
10196                             tg3_asic_rev(tp) == ASIC_REV_5762)
10197                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10198                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10199                 } else {
10200                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10201                              BDINFO_FLAGS_DISABLED);
10202                 }
10203
10204                 if (tg3_flag(tp, 57765_PLUS)) {
10205                         val = TG3_RX_STD_RING_SIZE(tp);
10206                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10207                         val |= (TG3_RX_STD_DMA_SZ << 2);
10208                 } else
10209                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10210         } else
10211                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10212
10213         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10214
10215         tpr->rx_std_prod_idx = tp->rx_pending;
10216         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10217
10218         tpr->rx_jmb_prod_idx =
10219                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10220         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10221
10222         tg3_rings_reset(tp);
10223
10224         /* Initialize MAC address and backoff seed. */
10225         __tg3_set_mac_addr(tp, false);
10226
10227         /* MTU + ethernet header + FCS + optional VLAN tag */
10228         tw32(MAC_RX_MTU_SIZE,
10229              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10230
10231         /* The slot time is changed by tg3_setup_phy if we
10232          * run at gigabit with half duplex.
10233          */
10234         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10235               (6 << TX_LENGTHS_IPG_SHIFT) |
10236               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10237
10238         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10239             tg3_asic_rev(tp) == ASIC_REV_5762)
10240                 val |= tr32(MAC_TX_LENGTHS) &
10241                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10242                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10243
10244         tw32(MAC_TX_LENGTHS, val);
10245
10246         /* Receive rules. */
10247         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10248         tw32(RCVLPC_CONFIG, 0x0181);
10249
10250         /* Calculate RDMAC_MODE setting early, we need it to determine
10251          * the RCVLPC_STATE_ENABLE mask.
10252          */
10253         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10254                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10255                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10256                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10257                       RDMAC_MODE_LNGREAD_ENAB);
10258
10259         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10260                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10261
10262         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10263             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10264             tg3_asic_rev(tp) == ASIC_REV_57780)
10265                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10266                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10267                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10268
10269         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10270             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10271                 if (tg3_flag(tp, TSO_CAPABLE) &&
10272                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10273                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10274                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10275                            !tg3_flag(tp, IS_5788)) {
10276                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10277                 }
10278         }
10279
10280         if (tg3_flag(tp, PCI_EXPRESS))
10281                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10282
10283         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10284                 tp->dma_limit = 0;
10285                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10286                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10287                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10288                 }
10289         }
10290
10291         if (tg3_flag(tp, HW_TSO_1) ||
10292             tg3_flag(tp, HW_TSO_2) ||
10293             tg3_flag(tp, HW_TSO_3))
10294                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10295
10296         if (tg3_flag(tp, 57765_PLUS) ||
10297             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10298             tg3_asic_rev(tp) == ASIC_REV_57780)
10299                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10300
10301         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10302             tg3_asic_rev(tp) == ASIC_REV_5762)
10303                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10304
10305         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10306             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10307             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10308             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10309             tg3_flag(tp, 57765_PLUS)) {
10310                 u32 tgtreg;
10311
10312                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10313                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10314                 else
10315                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10316
10317                 val = tr32(tgtreg);
10318                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10319                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10320                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10321                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10322                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10323                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10324                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10325                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10326                 }
10327                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10328         }
10329
10330         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10331             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10332             tg3_asic_rev(tp) == ASIC_REV_5762) {
10333                 u32 tgtreg;
10334
10335                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10336                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10337                 else
10338                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10339
10340                 val = tr32(tgtreg);
10341                 tw32(tgtreg, val |
10342                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10343                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10344         }
10345
10346         /* Receive/send statistics. */
10347         if (tg3_flag(tp, 5750_PLUS)) {
10348                 val = tr32(RCVLPC_STATS_ENABLE);
10349                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10350                 tw32(RCVLPC_STATS_ENABLE, val);
10351         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10352                    tg3_flag(tp, TSO_CAPABLE)) {
10353                 val = tr32(RCVLPC_STATS_ENABLE);
10354                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10355                 tw32(RCVLPC_STATS_ENABLE, val);
10356         } else {
10357                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10358         }
10359         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10360         tw32(SNDDATAI_STATSENAB, 0xffffff);
10361         tw32(SNDDATAI_STATSCTRL,
10362              (SNDDATAI_SCTRL_ENABLE |
10363               SNDDATAI_SCTRL_FASTUPD));
10364
10365         /* Setup host coalescing engine. */
10366         tw32(HOSTCC_MODE, 0);
10367         for (i = 0; i < 2000; i++) {
10368                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10369                         break;
10370                 udelay(10);
10371         }
10372
10373         __tg3_set_coalesce(tp, &tp->coal);
10374
10375         if (!tg3_flag(tp, 5705_PLUS)) {
10376                 /* Status/statistics block address.  See tg3_timer,
10377                  * the tg3_periodic_fetch_stats call there, and
10378                  * tg3_get_stats to see how this works for 5705/5750 chips.
10379                  */
10380                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10381                      ((u64) tp->stats_mapping >> 32));
10382                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10383                      ((u64) tp->stats_mapping & 0xffffffff));
10384                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10385
10386                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10387
10388                 /* Clear statistics and status block memory areas */
10389                 for (i = NIC_SRAM_STATS_BLK;
10390                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10391                      i += sizeof(u32)) {
10392                         tg3_write_mem(tp, i, 0);
10393                         udelay(40);
10394                 }
10395         }
10396
10397         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10398
10399         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10400         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10401         if (!tg3_flag(tp, 5705_PLUS))
10402                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10403
10404         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10405                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10406                 /* reset to prevent losing 1st rx packet intermittently */
10407                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10408                 udelay(10);
10409         }
10410
10411         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10412                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10413                         MAC_MODE_FHDE_ENABLE;
10414         if (tg3_flag(tp, ENABLE_APE))
10415                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10416         if (!tg3_flag(tp, 5705_PLUS) &&
10417             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10418             tg3_asic_rev(tp) != ASIC_REV_5700)
10419                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10420         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10421         udelay(40);
10422
10423         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10424          * If TG3_FLAG_IS_NIC is zero, we should read the
10425          * register to preserve the GPIO settings for LOMs. The GPIOs,
10426          * whether used as inputs or outputs, are set by boot code after
10427          * reset.
10428          */
10429         if (!tg3_flag(tp, IS_NIC)) {
10430                 u32 gpio_mask;
10431
10432                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10433                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10434                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10435
10436                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10437                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10438                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10439
10440                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10441                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10442
10443                 tp->grc_local_ctrl &= ~gpio_mask;
10444                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10445
10446                 /* GPIO1 must be driven high for eeprom write protect */
10447                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10448                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10449                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10450         }
10451         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10452         udelay(100);
10453
10454         if (tg3_flag(tp, USING_MSIX)) {
10455                 val = tr32(MSGINT_MODE);
10456                 val |= MSGINT_MODE_ENABLE;
10457                 if (tp->irq_cnt > 1)
10458                         val |= MSGINT_MODE_MULTIVEC_EN;
10459                 if (!tg3_flag(tp, 1SHOT_MSI))
10460                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10461                 tw32(MSGINT_MODE, val);
10462         }
10463
10464         if (!tg3_flag(tp, 5705_PLUS)) {
10465                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10466                 udelay(40);
10467         }
10468
10469         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10470                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10471                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10472                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10473                WDMAC_MODE_LNGREAD_ENAB);
10474
10475         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10476             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10477                 if (tg3_flag(tp, TSO_CAPABLE) &&
10478                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10479                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10480                         /* nothing */
10481                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10482                            !tg3_flag(tp, IS_5788)) {
10483                         val |= WDMAC_MODE_RX_ACCEL;
10484                 }
10485         }
10486
10487         /* Enable host coalescing bug fix */
10488         if (tg3_flag(tp, 5755_PLUS))
10489                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10490
10491         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10492                 val |= WDMAC_MODE_BURST_ALL_DATA;
10493
10494         tw32_f(WDMAC_MODE, val);
10495         udelay(40);
10496
10497         if (tg3_flag(tp, PCIX_MODE)) {
10498                 u16 pcix_cmd;
10499
10500                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10501                                      &pcix_cmd);
10502                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10503                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10504                         pcix_cmd |= PCI_X_CMD_READ_2K;
10505                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10506                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10507                         pcix_cmd |= PCI_X_CMD_READ_2K;
10508                 }
10509                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10510                                       pcix_cmd);
10511         }
10512
10513         tw32_f(RDMAC_MODE, rdmac_mode);
10514         udelay(40);
10515
10516         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10517             tg3_asic_rev(tp) == ASIC_REV_5720) {
10518                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10519                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10520                                 break;
10521                 }
10522                 if (i < TG3_NUM_RDMA_CHANNELS) {
10523                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10524                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10525                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10526                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10527                 }
10528         }
10529
10530         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10531         if (!tg3_flag(tp, 5705_PLUS))
10532                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10533
10534         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10535                 tw32(SNDDATAC_MODE,
10536                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10537         else
10538                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10539
10540         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10541         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10542         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10543         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10544                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10545         tw32(RCVDBDI_MODE, val);
10546         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10547         if (tg3_flag(tp, HW_TSO_1) ||
10548             tg3_flag(tp, HW_TSO_2) ||
10549             tg3_flag(tp, HW_TSO_3))
10550                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10551         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10552         if (tg3_flag(tp, ENABLE_TSS))
10553                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10554         tw32(SNDBDI_MODE, val);
10555         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10556
10557         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10558                 err = tg3_load_5701_a0_firmware_fix(tp);
10559                 if (err)
10560                         return err;
10561         }
10562
10563         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10564                 /* Ignore any errors for the firmware download. If download
10565                  * fails, the device will operate with EEE disabled
10566                  */
10567                 tg3_load_57766_firmware(tp);
10568         }
10569
10570         if (tg3_flag(tp, TSO_CAPABLE)) {
10571                 err = tg3_load_tso_firmware(tp);
10572                 if (err)
10573                         return err;
10574         }
10575
10576         tp->tx_mode = TX_MODE_ENABLE;
10577
10578         if (tg3_flag(tp, 5755_PLUS) ||
10579             tg3_asic_rev(tp) == ASIC_REV_5906)
10580                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10581
10582         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10583             tg3_asic_rev(tp) == ASIC_REV_5762) {
10584                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10585                 tp->tx_mode &= ~val;
10586                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10587         }
10588
10589         tw32_f(MAC_TX_MODE, tp->tx_mode);
10590         udelay(100);
10591
10592         if (tg3_flag(tp, ENABLE_RSS)) {
10593                 u32 rss_key[10];
10594
10595                 tg3_rss_write_indir_tbl(tp);
10596
10597                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10598
10599                 for (i = 0; i < 10 ; i++)
10600                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10601         }
10602
10603         tp->rx_mode = RX_MODE_ENABLE;
10604         if (tg3_flag(tp, 5755_PLUS))
10605                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10606
10607         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10608                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10609
10610         if (tg3_flag(tp, ENABLE_RSS))
10611                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10612                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10613                                RX_MODE_RSS_IPV6_HASH_EN |
10614                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10615                                RX_MODE_RSS_IPV4_HASH_EN |
10616                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10617
10618         tw32_f(MAC_RX_MODE, tp->rx_mode);
10619         udelay(10);
10620
10621         tw32(MAC_LED_CTRL, tp->led_ctrl);
10622
10623         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10624         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10625                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10626                 udelay(10);
10627         }
10628         tw32_f(MAC_RX_MODE, tp->rx_mode);
10629         udelay(10);
10630
10631         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10632                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10633                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10634                         /* Set drive transmission level to 1.2V  */
10635                         /* only if the signal pre-emphasis bit is not set  */
10636                         val = tr32(MAC_SERDES_CFG);
10637                         val &= 0xfffff000;
10638                         val |= 0x880;
10639                         tw32(MAC_SERDES_CFG, val);
10640                 }
10641                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10642                         tw32(MAC_SERDES_CFG, 0x616000);
10643         }
10644
10645         /* Prevent chip from dropping frames when flow control
10646          * is enabled.
10647          */
10648         if (tg3_flag(tp, 57765_CLASS))
10649                 val = 1;
10650         else
10651                 val = 2;
10652         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10653
10654         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10655             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10656                 /* Use hardware link auto-negotiation */
10657                 tg3_flag_set(tp, HW_AUTONEG);
10658         }
10659
10660         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10661             tg3_asic_rev(tp) == ASIC_REV_5714) {
10662                 u32 tmp;
10663
10664                 tmp = tr32(SERDES_RX_CTRL);
10665                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10666                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10667                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10668                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10669         }
10670
10671         if (!tg3_flag(tp, USE_PHYLIB)) {
10672                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10673                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10674
10675                 err = tg3_setup_phy(tp, false);
10676                 if (err)
10677                         return err;
10678
10679                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10680                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10681                         u32 tmp;
10682
10683                         /* Clear CRC stats. */
10684                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10685                                 tg3_writephy(tp, MII_TG3_TEST1,
10686                                              tmp | MII_TG3_TEST1_CRC_EN);
10687                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10688                         }
10689                 }
10690         }
10691
10692         __tg3_set_rx_mode(tp->dev);
10693
10694         /* Initialize receive rules. */
10695         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10696         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10697         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10698         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10699
10700         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10701                 limit = 8;
10702         else
10703                 limit = 16;
10704         if (tg3_flag(tp, ENABLE_ASF))
10705                 limit -= 4;
10706         switch (limit) {
10707         case 16:
10708                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10709         case 15:
10710                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10711         case 14:
10712                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10713         case 13:
10714                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10715         case 12:
10716                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10717         case 11:
10718                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10719         case 10:
10720                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10721         case 9:
10722                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10723         case 8:
10724                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10725         case 7:
10726                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10727         case 6:
10728                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10729         case 5:
10730                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10731         case 4:
10732                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10733         case 3:
10734                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10735         case 2:
10736         case 1:
10737
10738         default:
10739                 break;
10740         }
10741
10742         if (tg3_flag(tp, ENABLE_APE))
10743                 /* Write our heartbeat update interval to APE. */
10744                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10745                                 APE_HOST_HEARTBEAT_INT_5SEC);
10746
10747         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10748
10749         return 0;
10750 }
10751
10752 /* Called at device open time to get the chip ready for
10753  * packet processing.  Invoked with tp->lock held.
10754  */
10755 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10756 {
10757         /* Chip may have been just powered on. If so, the boot code may still
10758          * be running initialization. Wait for it to finish to avoid races in
10759          * accessing the hardware.
10760          */
10761         tg3_enable_register_access(tp);
10762         tg3_poll_fw(tp);
10763
10764         tg3_switch_clocks(tp);
10765
10766         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10767
10768         return tg3_reset_hw(tp, reset_phy);
10769 }
10770
10771 #ifdef CONFIG_TIGON3_HWMON
10772 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10773 {
10774         int i;
10775
10776         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10777                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10778
10779                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10780                 off += len;
10781
10782                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10783                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10784                         memset(ocir, 0, TG3_OCIR_LEN);
10785         }
10786 }
10787
10788 /* sysfs attributes for hwmon */
10789 static ssize_t tg3_show_temp(struct device *dev,
10790                              struct device_attribute *devattr, char *buf)
10791 {
10792         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10793         struct tg3 *tp = dev_get_drvdata(dev);
10794         u32 temperature;
10795
10796         spin_lock_bh(&tp->lock);
10797         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10798                                 sizeof(temperature));
10799         spin_unlock_bh(&tp->lock);
10800         return sprintf(buf, "%u\n", temperature * 1000);
10801 }
10802
10803
10804 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10805                           TG3_TEMP_SENSOR_OFFSET);
10806 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10807                           TG3_TEMP_CAUTION_OFFSET);
10808 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10809                           TG3_TEMP_MAX_OFFSET);
10810
10811 static struct attribute *tg3_attrs[] = {
10812         &sensor_dev_attr_temp1_input.dev_attr.attr,
10813         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10814         &sensor_dev_attr_temp1_max.dev_attr.attr,
10815         NULL
10816 };
10817 ATTRIBUTE_GROUPS(tg3);
10818
10819 static void tg3_hwmon_close(struct tg3 *tp)
10820 {
10821         if (tp->hwmon_dev) {
10822                 hwmon_device_unregister(tp->hwmon_dev);
10823                 tp->hwmon_dev = NULL;
10824         }
10825 }
10826
10827 static void tg3_hwmon_open(struct tg3 *tp)
10828 {
10829         int i;
10830         u32 size = 0;
10831         struct pci_dev *pdev = tp->pdev;
10832         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10833
10834         tg3_sd_scan_scratchpad(tp, ocirs);
10835
10836         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10837                 if (!ocirs[i].src_data_length)
10838                         continue;
10839
10840                 size += ocirs[i].src_hdr_length;
10841                 size += ocirs[i].src_data_length;
10842         }
10843
10844         if (!size)
10845                 return;
10846
10847         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10848                                                           tp, tg3_groups);
10849         if (IS_ERR(tp->hwmon_dev)) {
10850                 tp->hwmon_dev = NULL;
10851                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10852         }
10853 }
10854 #else
10855 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10856 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10857 #endif /* CONFIG_TIGON3_HWMON */
10858
10859
10860 #define TG3_STAT_ADD32(PSTAT, REG) \
10861 do {    u32 __val = tr32(REG); \
10862         (PSTAT)->low += __val; \
10863         if ((PSTAT)->low < __val) \
10864                 (PSTAT)->high += 1; \
10865 } while (0)
10866
10867 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10868 {
10869         struct tg3_hw_stats *sp = tp->hw_stats;
10870
10871         if (!tp->link_up)
10872                 return;
10873
10874         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10875         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10876         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10877         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10878         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10879         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10880         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10881         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10882         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10883         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10884         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10885         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10886         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10887         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10888                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10889                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10890                 u32 val;
10891
10892                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10893                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10894                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10895                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10896         }
10897
10898         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10899         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10900         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10901         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10902         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10903         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10904         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10905         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10906         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10907         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10908         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10909         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10910         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10911         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10912
10913         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10914         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10915             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10916             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10917             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10918                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10919         } else {
10920                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10921                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10922                 if (val) {
10923                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10924                         sp->rx_discards.low += val;
10925                         if (sp->rx_discards.low < val)
10926                                 sp->rx_discards.high += 1;
10927                 }
10928                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10929         }
10930         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10931 }
10932
10933 static void tg3_chk_missed_msi(struct tg3 *tp)
10934 {
10935         u32 i;
10936
10937         for (i = 0; i < tp->irq_cnt; i++) {
10938                 struct tg3_napi *tnapi = &tp->napi[i];
10939
10940                 if (tg3_has_work(tnapi)) {
10941                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10942                             tnapi->last_tx_cons == tnapi->tx_cons) {
10943                                 if (tnapi->chk_msi_cnt < 1) {
10944                                         tnapi->chk_msi_cnt++;
10945                                         return;
10946                                 }
10947                                 tg3_msi(0, tnapi);
10948                         }
10949                 }
10950                 tnapi->chk_msi_cnt = 0;
10951                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10952                 tnapi->last_tx_cons = tnapi->tx_cons;
10953         }
10954 }
10955
10956 static void tg3_timer(struct timer_list *t)
10957 {
10958         struct tg3 *tp = from_timer(tp, t, timer);
10959
10960         spin_lock(&tp->lock);
10961
10962         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10963                 spin_unlock(&tp->lock);
10964                 goto restart_timer;
10965         }
10966
10967         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10968             tg3_flag(tp, 57765_CLASS))
10969                 tg3_chk_missed_msi(tp);
10970
10971         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10972                 /* BCM4785: Flush posted writes from GbE to host memory. */
10973                 tr32(HOSTCC_MODE);
10974         }
10975
10976         if (!tg3_flag(tp, TAGGED_STATUS)) {
10977                 /* All of this garbage is because when using non-tagged
10978                  * IRQ status the mailbox/status_block protocol the chip
10979                  * uses with the cpu is race prone.
10980                  */
10981                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10982                         tw32(GRC_LOCAL_CTRL,
10983                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10984                 } else {
10985                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10986                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10987                 }
10988
10989                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10990                         spin_unlock(&tp->lock);
10991                         tg3_reset_task_schedule(tp);
10992                         goto restart_timer;
10993                 }
10994         }
10995
10996         /* This part only runs once per second. */
10997         if (!--tp->timer_counter) {
10998                 if (tg3_flag(tp, 5705_PLUS))
10999                         tg3_periodic_fetch_stats(tp);
11000
11001                 if (tp->setlpicnt && !--tp->setlpicnt)
11002                         tg3_phy_eee_enable(tp);
11003
11004                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11005                         u32 mac_stat;
11006                         int phy_event;
11007
11008                         mac_stat = tr32(MAC_STATUS);
11009
11010                         phy_event = 0;
11011                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11012                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11013                                         phy_event = 1;
11014                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11015                                 phy_event = 1;
11016
11017                         if (phy_event)
11018                                 tg3_setup_phy(tp, false);
11019                 } else if (tg3_flag(tp, POLL_SERDES)) {
11020                         u32 mac_stat = tr32(MAC_STATUS);
11021                         int need_setup = 0;
11022
11023                         if (tp->link_up &&
11024                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11025                                 need_setup = 1;
11026                         }
11027                         if (!tp->link_up &&
11028                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11029                                          MAC_STATUS_SIGNAL_DET))) {
11030                                 need_setup = 1;
11031                         }
11032                         if (need_setup) {
11033                                 if (!tp->serdes_counter) {
11034                                         tw32_f(MAC_MODE,
11035                                              (tp->mac_mode &
11036                                               ~MAC_MODE_PORT_MODE_MASK));
11037                                         udelay(40);
11038                                         tw32_f(MAC_MODE, tp->mac_mode);
11039                                         udelay(40);
11040                                 }
11041                                 tg3_setup_phy(tp, false);
11042                         }
11043                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11044                            tg3_flag(tp, 5780_CLASS)) {
11045                         tg3_serdes_parallel_detect(tp);
11046                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11047                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11048                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11049                                          TG3_CPMU_STATUS_LINK_MASK);
11050
11051                         if (link_up != tp->link_up)
11052                                 tg3_setup_phy(tp, false);
11053                 }
11054
11055                 tp->timer_counter = tp->timer_multiplier;
11056         }
11057
11058         /* Heartbeat is only sent once every 2 seconds.
11059          *
11060          * The heartbeat is to tell the ASF firmware that the host
11061          * driver is still alive.  In the event that the OS crashes,
11062          * ASF needs to reset the hardware to free up the FIFO space
11063          * that may be filled with rx packets destined for the host.
11064          * If the FIFO is full, ASF will no longer function properly.
11065          *
11066          * Unintended resets have been reported on real time kernels
11067          * where the timer doesn't run on time.  Netpoll will also have
11068          * same problem.
11069          *
11070          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11071          * to check the ring condition when the heartbeat is expiring
11072          * before doing the reset.  This will prevent most unintended
11073          * resets.
11074          */
11075         if (!--tp->asf_counter) {
11076                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11077                         tg3_wait_for_event_ack(tp);
11078
11079                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11080                                       FWCMD_NICDRV_ALIVE3);
11081                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11082                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11083                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11084
11085                         tg3_generate_fw_event(tp);
11086                 }
11087                 tp->asf_counter = tp->asf_multiplier;
11088         }
11089
11090         /* Update the APE heartbeat every 5 seconds.*/
11091         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11092
11093         spin_unlock(&tp->lock);
11094
11095 restart_timer:
11096         tp->timer.expires = jiffies + tp->timer_offset;
11097         add_timer(&tp->timer);
11098 }
11099
11100 static void tg3_timer_init(struct tg3 *tp)
11101 {
11102         if (tg3_flag(tp, TAGGED_STATUS) &&
11103             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11104             !tg3_flag(tp, 57765_CLASS))
11105                 tp->timer_offset = HZ;
11106         else
11107                 tp->timer_offset = HZ / 10;
11108
11109         BUG_ON(tp->timer_offset > HZ);
11110
11111         tp->timer_multiplier = (HZ / tp->timer_offset);
11112         tp->asf_multiplier = (HZ / tp->timer_offset) *
11113                              TG3_FW_UPDATE_FREQ_SEC;
11114
11115         timer_setup(&tp->timer, tg3_timer, 0);
11116 }
11117
11118 static void tg3_timer_start(struct tg3 *tp)
11119 {
11120         tp->asf_counter   = tp->asf_multiplier;
11121         tp->timer_counter = tp->timer_multiplier;
11122
11123         tp->timer.expires = jiffies + tp->timer_offset;
11124         add_timer(&tp->timer);
11125 }
11126
11127 static void tg3_timer_stop(struct tg3 *tp)
11128 {
11129         del_timer_sync(&tp->timer);
11130 }
11131
11132 /* Restart hardware after configuration changes, self-test, etc.
11133  * Invoked with tp->lock held.
11134  */
11135 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11136         __releases(tp->lock)
11137         __acquires(tp->lock)
11138 {
11139         int err;
11140
11141         err = tg3_init_hw(tp, reset_phy);
11142         if (err) {
11143                 netdev_err(tp->dev,
11144                            "Failed to re-initialize device, aborting\n");
11145                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11146                 tg3_full_unlock(tp);
11147                 tg3_timer_stop(tp);
11148                 tp->irq_sync = 0;
11149                 tg3_napi_enable(tp);
11150                 dev_close(tp->dev);
11151                 tg3_full_lock(tp, 0);
11152         }
11153         return err;
11154 }
11155
11156 static void tg3_reset_task(struct work_struct *work)
11157 {
11158         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11159         int err;
11160
11161         rtnl_lock();
11162         tg3_full_lock(tp, 0);
11163
11164         if (!netif_running(tp->dev)) {
11165                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11166                 tg3_full_unlock(tp);
11167                 rtnl_unlock();
11168                 return;
11169         }
11170
11171         tg3_full_unlock(tp);
11172
11173         tg3_phy_stop(tp);
11174
11175         tg3_netif_stop(tp);
11176
11177         tg3_full_lock(tp, 1);
11178
11179         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11180                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11181                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11182                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11183                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11184         }
11185
11186         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11187         err = tg3_init_hw(tp, true);
11188         if (err)
11189                 goto out;
11190
11191         tg3_netif_start(tp);
11192
11193 out:
11194         tg3_full_unlock(tp);
11195
11196         if (!err)
11197                 tg3_phy_start(tp);
11198
11199         tg3_flag_clear(tp, RESET_TASK_PENDING);
11200         rtnl_unlock();
11201 }
11202
11203 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11204 {
11205         irq_handler_t fn;
11206         unsigned long flags;
11207         char *name;
11208         struct tg3_napi *tnapi = &tp->napi[irq_num];
11209
11210         if (tp->irq_cnt == 1)
11211                 name = tp->dev->name;
11212         else {
11213                 name = &tnapi->irq_lbl[0];
11214                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11215                         snprintf(name, IFNAMSIZ,
11216                                  "%s-txrx-%d", tp->dev->name, irq_num);
11217                 else if (tnapi->tx_buffers)
11218                         snprintf(name, IFNAMSIZ,
11219                                  "%s-tx-%d", tp->dev->name, irq_num);
11220                 else if (tnapi->rx_rcb)
11221                         snprintf(name, IFNAMSIZ,
11222                                  "%s-rx-%d", tp->dev->name, irq_num);
11223                 else
11224                         snprintf(name, IFNAMSIZ,
11225                                  "%s-%d", tp->dev->name, irq_num);
11226                 name[IFNAMSIZ-1] = 0;
11227         }
11228
11229         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11230                 fn = tg3_msi;
11231                 if (tg3_flag(tp, 1SHOT_MSI))
11232                         fn = tg3_msi_1shot;
11233                 flags = 0;
11234         } else {
11235                 fn = tg3_interrupt;
11236                 if (tg3_flag(tp, TAGGED_STATUS))
11237                         fn = tg3_interrupt_tagged;
11238                 flags = IRQF_SHARED;
11239         }
11240
11241         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11242 }
11243
11244 static int tg3_test_interrupt(struct tg3 *tp)
11245 {
11246         struct tg3_napi *tnapi = &tp->napi[0];
11247         struct net_device *dev = tp->dev;
11248         int err, i, intr_ok = 0;
11249         u32 val;
11250
11251         if (!netif_running(dev))
11252                 return -ENODEV;
11253
11254         tg3_disable_ints(tp);
11255
11256         free_irq(tnapi->irq_vec, tnapi);
11257
11258         /*
11259          * Turn off MSI one shot mode.  Otherwise this test has no
11260          * observable way to know whether the interrupt was delivered.
11261          */
11262         if (tg3_flag(tp, 57765_PLUS)) {
11263                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11264                 tw32(MSGINT_MODE, val);
11265         }
11266
11267         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11268                           IRQF_SHARED, dev->name, tnapi);
11269         if (err)
11270                 return err;
11271
11272         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11273         tg3_enable_ints(tp);
11274
11275         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11276                tnapi->coal_now);
11277
11278         for (i = 0; i < 5; i++) {
11279                 u32 int_mbox, misc_host_ctrl;
11280
11281                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11282                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11283
11284                 if ((int_mbox != 0) ||
11285                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11286                         intr_ok = 1;
11287                         break;
11288                 }
11289
11290                 if (tg3_flag(tp, 57765_PLUS) &&
11291                     tnapi->hw_status->status_tag != tnapi->last_tag)
11292                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11293
11294                 msleep(10);
11295         }
11296
11297         tg3_disable_ints(tp);
11298
11299         free_irq(tnapi->irq_vec, tnapi);
11300
11301         err = tg3_request_irq(tp, 0);
11302
11303         if (err)
11304                 return err;
11305
11306         if (intr_ok) {
11307                 /* Reenable MSI one shot mode. */
11308                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11309                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11310                         tw32(MSGINT_MODE, val);
11311                 }
11312                 return 0;
11313         }
11314
11315         return -EIO;
11316 }
11317
11318 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11319  * successfully restored
11320  */
11321 static int tg3_test_msi(struct tg3 *tp)
11322 {
11323         int err;
11324         u16 pci_cmd;
11325
11326         if (!tg3_flag(tp, USING_MSI))
11327                 return 0;
11328
11329         /* Turn off SERR reporting in case MSI terminates with Master
11330          * Abort.
11331          */
11332         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11333         pci_write_config_word(tp->pdev, PCI_COMMAND,
11334                               pci_cmd & ~PCI_COMMAND_SERR);
11335
11336         err = tg3_test_interrupt(tp);
11337
11338         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11339
11340         if (!err)
11341                 return 0;
11342
11343         /* other failures */
11344         if (err != -EIO)
11345                 return err;
11346
11347         /* MSI test failed, go back to INTx mode */
11348         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11349                     "to INTx mode. Please report this failure to the PCI "
11350                     "maintainer and include system chipset information\n");
11351
11352         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11353
11354         pci_disable_msi(tp->pdev);
11355
11356         tg3_flag_clear(tp, USING_MSI);
11357         tp->napi[0].irq_vec = tp->pdev->irq;
11358
11359         err = tg3_request_irq(tp, 0);
11360         if (err)
11361                 return err;
11362
11363         /* Need to reset the chip because the MSI cycle may have terminated
11364          * with Master Abort.
11365          */
11366         tg3_full_lock(tp, 1);
11367
11368         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11369         err = tg3_init_hw(tp, true);
11370
11371         tg3_full_unlock(tp);
11372
11373         if (err)
11374                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11375
11376         return err;
11377 }
11378
11379 static int tg3_request_firmware(struct tg3 *tp)
11380 {
11381         const struct tg3_firmware_hdr *fw_hdr;
11382
11383         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11384                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11385                            tp->fw_needed);
11386                 return -ENOENT;
11387         }
11388
11389         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11390
11391         /* Firmware blob starts with version numbers, followed by
11392          * start address and _full_ length including BSS sections
11393          * (which must be longer than the actual data, of course
11394          */
11395
11396         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11397         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11398                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11399                            tp->fw_len, tp->fw_needed);
11400                 release_firmware(tp->fw);
11401                 tp->fw = NULL;
11402                 return -EINVAL;
11403         }
11404
11405         /* We no longer need firmware; we have it. */
11406         tp->fw_needed = NULL;
11407         return 0;
11408 }
11409
11410 static u32 tg3_irq_count(struct tg3 *tp)
11411 {
11412         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11413
11414         if (irq_cnt > 1) {
11415                 /* We want as many rx rings enabled as there are cpus.
11416                  * In multiqueue MSI-X mode, the first MSI-X vector
11417                  * only deals with link interrupts, etc, so we add
11418                  * one to the number of vectors we are requesting.
11419                  */
11420                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11421         }
11422
11423         return irq_cnt;
11424 }
11425
11426 static bool tg3_enable_msix(struct tg3 *tp)
11427 {
11428         int i, rc;
11429         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11430
11431         tp->txq_cnt = tp->txq_req;
11432         tp->rxq_cnt = tp->rxq_req;
11433         if (!tp->rxq_cnt)
11434                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11435         if (tp->rxq_cnt > tp->rxq_max)
11436                 tp->rxq_cnt = tp->rxq_max;
11437
11438         /* Disable multiple TX rings by default.  Simple round-robin hardware
11439          * scheduling of the TX rings can cause starvation of rings with
11440          * small packets when other rings have TSO or jumbo packets.
11441          */
11442         if (!tp->txq_req)
11443                 tp->txq_cnt = 1;
11444
11445         tp->irq_cnt = tg3_irq_count(tp);
11446
11447         for (i = 0; i < tp->irq_max; i++) {
11448                 msix_ent[i].entry  = i;
11449                 msix_ent[i].vector = 0;
11450         }
11451
11452         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11453         if (rc < 0) {
11454                 return false;
11455         } else if (rc < tp->irq_cnt) {
11456                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11457                               tp->irq_cnt, rc);
11458                 tp->irq_cnt = rc;
11459                 tp->rxq_cnt = max(rc - 1, 1);
11460                 if (tp->txq_cnt)
11461                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11462         }
11463
11464         for (i = 0; i < tp->irq_max; i++)
11465                 tp->napi[i].irq_vec = msix_ent[i].vector;
11466
11467         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11468                 pci_disable_msix(tp->pdev);
11469                 return false;
11470         }
11471
11472         if (tp->irq_cnt == 1)
11473                 return true;
11474
11475         tg3_flag_set(tp, ENABLE_RSS);
11476
11477         if (tp->txq_cnt > 1)
11478                 tg3_flag_set(tp, ENABLE_TSS);
11479
11480         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11481
11482         return true;
11483 }
11484
11485 static void tg3_ints_init(struct tg3 *tp)
11486 {
11487         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11488             !tg3_flag(tp, TAGGED_STATUS)) {
11489                 /* All MSI supporting chips should support tagged
11490                  * status.  Assert that this is the case.
11491                  */
11492                 netdev_warn(tp->dev,
11493                             "MSI without TAGGED_STATUS? Not using MSI\n");
11494                 goto defcfg;
11495         }
11496
11497         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11498                 tg3_flag_set(tp, USING_MSIX);
11499         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11500                 tg3_flag_set(tp, USING_MSI);
11501
11502         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11503                 u32 msi_mode = tr32(MSGINT_MODE);
11504                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11505                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11506                 if (!tg3_flag(tp, 1SHOT_MSI))
11507                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11508                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11509         }
11510 defcfg:
11511         if (!tg3_flag(tp, USING_MSIX)) {
11512                 tp->irq_cnt = 1;
11513                 tp->napi[0].irq_vec = tp->pdev->irq;
11514         }
11515
11516         if (tp->irq_cnt == 1) {
11517                 tp->txq_cnt = 1;
11518                 tp->rxq_cnt = 1;
11519                 netif_set_real_num_tx_queues(tp->dev, 1);
11520                 netif_set_real_num_rx_queues(tp->dev, 1);
11521         }
11522 }
11523
11524 static void tg3_ints_fini(struct tg3 *tp)
11525 {
11526         if (tg3_flag(tp, USING_MSIX))
11527                 pci_disable_msix(tp->pdev);
11528         else if (tg3_flag(tp, USING_MSI))
11529                 pci_disable_msi(tp->pdev);
11530         tg3_flag_clear(tp, USING_MSI);
11531         tg3_flag_clear(tp, USING_MSIX);
11532         tg3_flag_clear(tp, ENABLE_RSS);
11533         tg3_flag_clear(tp, ENABLE_TSS);
11534 }
11535
11536 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11537                      bool init)
11538 {
11539         struct net_device *dev = tp->dev;
11540         int i, err;
11541
11542         /*
11543          * Setup interrupts first so we know how
11544          * many NAPI resources to allocate
11545          */
11546         tg3_ints_init(tp);
11547
11548         tg3_rss_check_indir_tbl(tp);
11549
11550         /* The placement of this call is tied
11551          * to the setup and use of Host TX descriptors.
11552          */
11553         err = tg3_alloc_consistent(tp);
11554         if (err)
11555                 goto out_ints_fini;
11556
11557         tg3_napi_init(tp);
11558
11559         tg3_napi_enable(tp);
11560
11561         for (i = 0; i < tp->irq_cnt; i++) {
11562                 err = tg3_request_irq(tp, i);
11563                 if (err) {
11564                         for (i--; i >= 0; i--) {
11565                                 struct tg3_napi *tnapi = &tp->napi[i];
11566
11567                                 free_irq(tnapi->irq_vec, tnapi);
11568                         }
11569                         goto out_napi_fini;
11570                 }
11571         }
11572
11573         tg3_full_lock(tp, 0);
11574
11575         if (init)
11576                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11577
11578         err = tg3_init_hw(tp, reset_phy);
11579         if (err) {
11580                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11581                 tg3_free_rings(tp);
11582         }
11583
11584         tg3_full_unlock(tp);
11585
11586         if (err)
11587                 goto out_free_irq;
11588
11589         if (test_irq && tg3_flag(tp, USING_MSI)) {
11590                 err = tg3_test_msi(tp);
11591
11592                 if (err) {
11593                         tg3_full_lock(tp, 0);
11594                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11595                         tg3_free_rings(tp);
11596                         tg3_full_unlock(tp);
11597
11598                         goto out_napi_fini;
11599                 }
11600
11601                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11602                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11603
11604                         tw32(PCIE_TRANSACTION_CFG,
11605                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11606                 }
11607         }
11608
11609         tg3_phy_start(tp);
11610
11611         tg3_hwmon_open(tp);
11612
11613         tg3_full_lock(tp, 0);
11614
11615         tg3_timer_start(tp);
11616         tg3_flag_set(tp, INIT_COMPLETE);
11617         tg3_enable_ints(tp);
11618
11619         tg3_ptp_resume(tp);
11620
11621         tg3_full_unlock(tp);
11622
11623         netif_tx_start_all_queues(dev);
11624
11625         /*
11626          * Reset loopback feature if it was turned on while the device was down
11627          * make sure that it's installed properly now.
11628          */
11629         if (dev->features & NETIF_F_LOOPBACK)
11630                 tg3_set_loopback(dev, dev->features);
11631
11632         return 0;
11633
11634 out_free_irq:
11635         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11636                 struct tg3_napi *tnapi = &tp->napi[i];
11637                 free_irq(tnapi->irq_vec, tnapi);
11638         }
11639
11640 out_napi_fini:
11641         tg3_napi_disable(tp);
11642         tg3_napi_fini(tp);
11643         tg3_free_consistent(tp);
11644
11645 out_ints_fini:
11646         tg3_ints_fini(tp);
11647
11648         return err;
11649 }
11650
11651 static void tg3_stop(struct tg3 *tp)
11652 {
11653         int i;
11654
11655         tg3_reset_task_cancel(tp);
11656         tg3_netif_stop(tp);
11657
11658         tg3_timer_stop(tp);
11659
11660         tg3_hwmon_close(tp);
11661
11662         tg3_phy_stop(tp);
11663
11664         tg3_full_lock(tp, 1);
11665
11666         tg3_disable_ints(tp);
11667
11668         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11669         tg3_free_rings(tp);
11670         tg3_flag_clear(tp, INIT_COMPLETE);
11671
11672         tg3_full_unlock(tp);
11673
11674         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11675                 struct tg3_napi *tnapi = &tp->napi[i];
11676                 free_irq(tnapi->irq_vec, tnapi);
11677         }
11678
11679         tg3_ints_fini(tp);
11680
11681         tg3_napi_fini(tp);
11682
11683         tg3_free_consistent(tp);
11684 }
11685
11686 static int tg3_open(struct net_device *dev)
11687 {
11688         struct tg3 *tp = netdev_priv(dev);
11689         int err;
11690
11691         if (tp->pcierr_recovery) {
11692                 netdev_err(dev, "Failed to open device. PCI error recovery "
11693                            "in progress\n");
11694                 return -EAGAIN;
11695         }
11696
11697         if (tp->fw_needed) {
11698                 err = tg3_request_firmware(tp);
11699                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11700                         if (err) {
11701                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11702                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11703                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11704                                 netdev_warn(tp->dev, "EEE capability restored\n");
11705                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11706                         }
11707                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11708                         if (err)
11709                                 return err;
11710                 } else if (err) {
11711                         netdev_warn(tp->dev, "TSO capability disabled\n");
11712                         tg3_flag_clear(tp, TSO_CAPABLE);
11713                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11714                         netdev_notice(tp->dev, "TSO capability restored\n");
11715                         tg3_flag_set(tp, TSO_CAPABLE);
11716                 }
11717         }
11718
11719         tg3_carrier_off(tp);
11720
11721         err = tg3_power_up(tp);
11722         if (err)
11723                 return err;
11724
11725         tg3_full_lock(tp, 0);
11726
11727         tg3_disable_ints(tp);
11728         tg3_flag_clear(tp, INIT_COMPLETE);
11729
11730         tg3_full_unlock(tp);
11731
11732         err = tg3_start(tp,
11733                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11734                         true, true);
11735         if (err) {
11736                 tg3_frob_aux_power(tp, false);
11737                 pci_set_power_state(tp->pdev, PCI_D3hot);
11738         }
11739
11740         return err;
11741 }
11742
11743 static int tg3_close(struct net_device *dev)
11744 {
11745         struct tg3 *tp = netdev_priv(dev);
11746
11747         if (tp->pcierr_recovery) {
11748                 netdev_err(dev, "Failed to close device. PCI error recovery "
11749                            "in progress\n");
11750                 return -EAGAIN;
11751         }
11752
11753         tg3_stop(tp);
11754
11755         if (pci_device_is_present(tp->pdev)) {
11756                 tg3_power_down_prepare(tp);
11757
11758                 tg3_carrier_off(tp);
11759         }
11760         return 0;
11761 }
11762
11763 static inline u64 get_stat64(tg3_stat64_t *val)
11764 {
11765        return ((u64)val->high << 32) | ((u64)val->low);
11766 }
11767
11768 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11769 {
11770         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11771
11772         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11773             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11774              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11775                 u32 val;
11776
11777                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11778                         tg3_writephy(tp, MII_TG3_TEST1,
11779                                      val | MII_TG3_TEST1_CRC_EN);
11780                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11781                 } else
11782                         val = 0;
11783
11784                 tp->phy_crc_errors += val;
11785
11786                 return tp->phy_crc_errors;
11787         }
11788
11789         return get_stat64(&hw_stats->rx_fcs_errors);
11790 }
11791
11792 #define ESTAT_ADD(member) \
11793         estats->member =        old_estats->member + \
11794                                 get_stat64(&hw_stats->member)
11795
11796 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11797 {
11798         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11799         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11800
11801         ESTAT_ADD(rx_octets);
11802         ESTAT_ADD(rx_fragments);
11803         ESTAT_ADD(rx_ucast_packets);
11804         ESTAT_ADD(rx_mcast_packets);
11805         ESTAT_ADD(rx_bcast_packets);
11806         ESTAT_ADD(rx_fcs_errors);
11807         ESTAT_ADD(rx_align_errors);
11808         ESTAT_ADD(rx_xon_pause_rcvd);
11809         ESTAT_ADD(rx_xoff_pause_rcvd);
11810         ESTAT_ADD(rx_mac_ctrl_rcvd);
11811         ESTAT_ADD(rx_xoff_entered);
11812         ESTAT_ADD(rx_frame_too_long_errors);
11813         ESTAT_ADD(rx_jabbers);
11814         ESTAT_ADD(rx_undersize_packets);
11815         ESTAT_ADD(rx_in_length_errors);
11816         ESTAT_ADD(rx_out_length_errors);
11817         ESTAT_ADD(rx_64_or_less_octet_packets);
11818         ESTAT_ADD(rx_65_to_127_octet_packets);
11819         ESTAT_ADD(rx_128_to_255_octet_packets);
11820         ESTAT_ADD(rx_256_to_511_octet_packets);
11821         ESTAT_ADD(rx_512_to_1023_octet_packets);
11822         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11823         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11824         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11825         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11826         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11827
11828         ESTAT_ADD(tx_octets);
11829         ESTAT_ADD(tx_collisions);
11830         ESTAT_ADD(tx_xon_sent);
11831         ESTAT_ADD(tx_xoff_sent);
11832         ESTAT_ADD(tx_flow_control);
11833         ESTAT_ADD(tx_mac_errors);
11834         ESTAT_ADD(tx_single_collisions);
11835         ESTAT_ADD(tx_mult_collisions);
11836         ESTAT_ADD(tx_deferred);
11837         ESTAT_ADD(tx_excessive_collisions);
11838         ESTAT_ADD(tx_late_collisions);
11839         ESTAT_ADD(tx_collide_2times);
11840         ESTAT_ADD(tx_collide_3times);
11841         ESTAT_ADD(tx_collide_4times);
11842         ESTAT_ADD(tx_collide_5times);
11843         ESTAT_ADD(tx_collide_6times);
11844         ESTAT_ADD(tx_collide_7times);
11845         ESTAT_ADD(tx_collide_8times);
11846         ESTAT_ADD(tx_collide_9times);
11847         ESTAT_ADD(tx_collide_10times);
11848         ESTAT_ADD(tx_collide_11times);
11849         ESTAT_ADD(tx_collide_12times);
11850         ESTAT_ADD(tx_collide_13times);
11851         ESTAT_ADD(tx_collide_14times);
11852         ESTAT_ADD(tx_collide_15times);
11853         ESTAT_ADD(tx_ucast_packets);
11854         ESTAT_ADD(tx_mcast_packets);
11855         ESTAT_ADD(tx_bcast_packets);
11856         ESTAT_ADD(tx_carrier_sense_errors);
11857         ESTAT_ADD(tx_discards);
11858         ESTAT_ADD(tx_errors);
11859
11860         ESTAT_ADD(dma_writeq_full);
11861         ESTAT_ADD(dma_write_prioq_full);
11862         ESTAT_ADD(rxbds_empty);
11863         ESTAT_ADD(rx_discards);
11864         ESTAT_ADD(rx_errors);
11865         ESTAT_ADD(rx_threshold_hit);
11866
11867         ESTAT_ADD(dma_readq_full);
11868         ESTAT_ADD(dma_read_prioq_full);
11869         ESTAT_ADD(tx_comp_queue_full);
11870
11871         ESTAT_ADD(ring_set_send_prod_index);
11872         ESTAT_ADD(ring_status_update);
11873         ESTAT_ADD(nic_irqs);
11874         ESTAT_ADD(nic_avoided_irqs);
11875         ESTAT_ADD(nic_tx_threshold_hit);
11876
11877         ESTAT_ADD(mbuf_lwm_thresh_hit);
11878 }
11879
11880 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11881 {
11882         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11883         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11884
11885         stats->rx_packets = old_stats->rx_packets +
11886                 get_stat64(&hw_stats->rx_ucast_packets) +
11887                 get_stat64(&hw_stats->rx_mcast_packets) +
11888                 get_stat64(&hw_stats->rx_bcast_packets);
11889
11890         stats->tx_packets = old_stats->tx_packets +
11891                 get_stat64(&hw_stats->tx_ucast_packets) +
11892                 get_stat64(&hw_stats->tx_mcast_packets) +
11893                 get_stat64(&hw_stats->tx_bcast_packets);
11894
11895         stats->rx_bytes = old_stats->rx_bytes +
11896                 get_stat64(&hw_stats->rx_octets);
11897         stats->tx_bytes = old_stats->tx_bytes +
11898                 get_stat64(&hw_stats->tx_octets);
11899
11900         stats->rx_errors = old_stats->rx_errors +
11901                 get_stat64(&hw_stats->rx_errors);
11902         stats->tx_errors = old_stats->tx_errors +
11903                 get_stat64(&hw_stats->tx_errors) +
11904                 get_stat64(&hw_stats->tx_mac_errors) +
11905                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11906                 get_stat64(&hw_stats->tx_discards);
11907
11908         stats->multicast = old_stats->multicast +
11909                 get_stat64(&hw_stats->rx_mcast_packets);
11910         stats->collisions = old_stats->collisions +
11911                 get_stat64(&hw_stats->tx_collisions);
11912
11913         stats->rx_length_errors = old_stats->rx_length_errors +
11914                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11915                 get_stat64(&hw_stats->rx_undersize_packets);
11916
11917         stats->rx_frame_errors = old_stats->rx_frame_errors +
11918                 get_stat64(&hw_stats->rx_align_errors);
11919         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11920                 get_stat64(&hw_stats->tx_discards);
11921         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11922                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11923
11924         stats->rx_crc_errors = old_stats->rx_crc_errors +
11925                 tg3_calc_crc_errors(tp);
11926
11927         stats->rx_missed_errors = old_stats->rx_missed_errors +
11928                 get_stat64(&hw_stats->rx_discards);
11929
11930         stats->rx_dropped = tp->rx_dropped;
11931         stats->tx_dropped = tp->tx_dropped;
11932 }
11933
11934 static int tg3_get_regs_len(struct net_device *dev)
11935 {
11936         return TG3_REG_BLK_SIZE;
11937 }
11938
11939 static void tg3_get_regs(struct net_device *dev,
11940                 struct ethtool_regs *regs, void *_p)
11941 {
11942         struct tg3 *tp = netdev_priv(dev);
11943
11944         regs->version = 0;
11945
11946         memset(_p, 0, TG3_REG_BLK_SIZE);
11947
11948         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11949                 return;
11950
11951         tg3_full_lock(tp, 0);
11952
11953         tg3_dump_legacy_regs(tp, (u32 *)_p);
11954
11955         tg3_full_unlock(tp);
11956 }
11957
11958 static int tg3_get_eeprom_len(struct net_device *dev)
11959 {
11960         struct tg3 *tp = netdev_priv(dev);
11961
11962         return tp->nvram_size;
11963 }
11964
11965 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11966 {
11967         struct tg3 *tp = netdev_priv(dev);
11968         int ret, cpmu_restore = 0;
11969         u8  *pd;
11970         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11971         __be32 val;
11972
11973         if (tg3_flag(tp, NO_NVRAM))
11974                 return -EINVAL;
11975
11976         offset = eeprom->offset;
11977         len = eeprom->len;
11978         eeprom->len = 0;
11979
11980         eeprom->magic = TG3_EEPROM_MAGIC;
11981
11982         /* Override clock, link aware and link idle modes */
11983         if (tg3_flag(tp, CPMU_PRESENT)) {
11984                 cpmu_val = tr32(TG3_CPMU_CTRL);
11985                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11986                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11987                         tw32(TG3_CPMU_CTRL, cpmu_val &
11988                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11989                                              CPMU_CTRL_LINK_IDLE_MODE));
11990                         cpmu_restore = 1;
11991                 }
11992         }
11993         tg3_override_clk(tp);
11994
11995         if (offset & 3) {
11996                 /* adjustments to start on required 4 byte boundary */
11997                 b_offset = offset & 3;
11998                 b_count = 4 - b_offset;
11999                 if (b_count > len) {
12000                         /* i.e. offset=1 len=2 */
12001                         b_count = len;
12002                 }
12003                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12004                 if (ret)
12005                         goto eeprom_done;
12006                 memcpy(data, ((char *)&val) + b_offset, b_count);
12007                 len -= b_count;
12008                 offset += b_count;
12009                 eeprom->len += b_count;
12010         }
12011
12012         /* read bytes up to the last 4 byte boundary */
12013         pd = &data[eeprom->len];
12014         for (i = 0; i < (len - (len & 3)); i += 4) {
12015                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12016                 if (ret) {
12017                         if (i)
12018                                 i -= 4;
12019                         eeprom->len += i;
12020                         goto eeprom_done;
12021                 }
12022                 memcpy(pd + i, &val, 4);
12023                 if (need_resched()) {
12024                         if (signal_pending(current)) {
12025                                 eeprom->len += i;
12026                                 ret = -EINTR;
12027                                 goto eeprom_done;
12028                         }
12029                         cond_resched();
12030                 }
12031         }
12032         eeprom->len += i;
12033
12034         if (len & 3) {
12035                 /* read last bytes not ending on 4 byte boundary */
12036                 pd = &data[eeprom->len];
12037                 b_count = len & 3;
12038                 b_offset = offset + len - b_count;
12039                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12040                 if (ret)
12041                         goto eeprom_done;
12042                 memcpy(pd, &val, b_count);
12043                 eeprom->len += b_count;
12044         }
12045         ret = 0;
12046
12047 eeprom_done:
12048         /* Restore clock, link aware and link idle modes */
12049         tg3_restore_clk(tp);
12050         if (cpmu_restore)
12051                 tw32(TG3_CPMU_CTRL, cpmu_val);
12052
12053         return ret;
12054 }
12055
12056 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12057 {
12058         struct tg3 *tp = netdev_priv(dev);
12059         int ret;
12060         u32 offset, len, b_offset, odd_len;
12061         u8 *buf;
12062         __be32 start = 0, end;
12063
12064         if (tg3_flag(tp, NO_NVRAM) ||
12065             eeprom->magic != TG3_EEPROM_MAGIC)
12066                 return -EINVAL;
12067
12068         offset = eeprom->offset;
12069         len = eeprom->len;
12070
12071         if ((b_offset = (offset & 3))) {
12072                 /* adjustments to start on required 4 byte boundary */
12073                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12074                 if (ret)
12075                         return ret;
12076                 len += b_offset;
12077                 offset &= ~3;
12078                 if (len < 4)
12079                         len = 4;
12080         }
12081
12082         odd_len = 0;
12083         if (len & 3) {
12084                 /* adjustments to end on required 4 byte boundary */
12085                 odd_len = 1;
12086                 len = (len + 3) & ~3;
12087                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12088                 if (ret)
12089                         return ret;
12090         }
12091
12092         buf = data;
12093         if (b_offset || odd_len) {
12094                 buf = kmalloc(len, GFP_KERNEL);
12095                 if (!buf)
12096                         return -ENOMEM;
12097                 if (b_offset)
12098                         memcpy(buf, &start, 4);
12099                 if (odd_len)
12100                         memcpy(buf+len-4, &end, 4);
12101                 memcpy(buf + b_offset, data, eeprom->len);
12102         }
12103
12104         ret = tg3_nvram_write_block(tp, offset, len, buf);
12105
12106         if (buf != data)
12107                 kfree(buf);
12108
12109         return ret;
12110 }
12111
12112 static int tg3_get_link_ksettings(struct net_device *dev,
12113                                   struct ethtool_link_ksettings *cmd)
12114 {
12115         struct tg3 *tp = netdev_priv(dev);
12116         u32 supported, advertising;
12117
12118         if (tg3_flag(tp, USE_PHYLIB)) {
12119                 struct phy_device *phydev;
12120                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12121                         return -EAGAIN;
12122                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12123                 phy_ethtool_ksettings_get(phydev, cmd);
12124
12125                 return 0;
12126         }
12127
12128         supported = (SUPPORTED_Autoneg);
12129
12130         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12131                 supported |= (SUPPORTED_1000baseT_Half |
12132                               SUPPORTED_1000baseT_Full);
12133
12134         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12135                 supported |= (SUPPORTED_100baseT_Half |
12136                               SUPPORTED_100baseT_Full |
12137                               SUPPORTED_10baseT_Half |
12138                               SUPPORTED_10baseT_Full |
12139                               SUPPORTED_TP);
12140                 cmd->base.port = PORT_TP;
12141         } else {
12142                 supported |= SUPPORTED_FIBRE;
12143                 cmd->base.port = PORT_FIBRE;
12144         }
12145         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12146                                                 supported);
12147
12148         advertising = tp->link_config.advertising;
12149         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12150                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12151                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12152                                 advertising |= ADVERTISED_Pause;
12153                         } else {
12154                                 advertising |= ADVERTISED_Pause |
12155                                         ADVERTISED_Asym_Pause;
12156                         }
12157                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12158                         advertising |= ADVERTISED_Asym_Pause;
12159                 }
12160         }
12161         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12162                                                 advertising);
12163
12164         if (netif_running(dev) && tp->link_up) {
12165                 cmd->base.speed = tp->link_config.active_speed;
12166                 cmd->base.duplex = tp->link_config.active_duplex;
12167                 ethtool_convert_legacy_u32_to_link_mode(
12168                         cmd->link_modes.lp_advertising,
12169                         tp->link_config.rmt_adv);
12170
12171                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12172                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12173                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12174                         else
12175                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12176                 }
12177         } else {
12178                 cmd->base.speed = SPEED_UNKNOWN;
12179                 cmd->base.duplex = DUPLEX_UNKNOWN;
12180                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12181         }
12182         cmd->base.phy_address = tp->phy_addr;
12183         cmd->base.autoneg = tp->link_config.autoneg;
12184         return 0;
12185 }
12186
12187 static int tg3_set_link_ksettings(struct net_device *dev,
12188                                   const struct ethtool_link_ksettings *cmd)
12189 {
12190         struct tg3 *tp = netdev_priv(dev);
12191         u32 speed = cmd->base.speed;
12192         u32 advertising;
12193
12194         if (tg3_flag(tp, USE_PHYLIB)) {
12195                 struct phy_device *phydev;
12196                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12197                         return -EAGAIN;
12198                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12199                 return phy_ethtool_ksettings_set(phydev, cmd);
12200         }
12201
12202         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12203             cmd->base.autoneg != AUTONEG_DISABLE)
12204                 return -EINVAL;
12205
12206         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12207             cmd->base.duplex != DUPLEX_FULL &&
12208             cmd->base.duplex != DUPLEX_HALF)
12209                 return -EINVAL;
12210
12211         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12212                                                 cmd->link_modes.advertising);
12213
12214         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12215                 u32 mask = ADVERTISED_Autoneg |
12216                            ADVERTISED_Pause |
12217                            ADVERTISED_Asym_Pause;
12218
12219                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12220                         mask |= ADVERTISED_1000baseT_Half |
12221                                 ADVERTISED_1000baseT_Full;
12222
12223                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12224                         mask |= ADVERTISED_100baseT_Half |
12225                                 ADVERTISED_100baseT_Full |
12226                                 ADVERTISED_10baseT_Half |
12227                                 ADVERTISED_10baseT_Full |
12228                                 ADVERTISED_TP;
12229                 else
12230                         mask |= ADVERTISED_FIBRE;
12231
12232                 if (advertising & ~mask)
12233                         return -EINVAL;
12234
12235                 mask &= (ADVERTISED_1000baseT_Half |
12236                          ADVERTISED_1000baseT_Full |
12237                          ADVERTISED_100baseT_Half |
12238                          ADVERTISED_100baseT_Full |
12239                          ADVERTISED_10baseT_Half |
12240                          ADVERTISED_10baseT_Full);
12241
12242                 advertising &= mask;
12243         } else {
12244                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12245                         if (speed != SPEED_1000)
12246                                 return -EINVAL;
12247
12248                         if (cmd->base.duplex != DUPLEX_FULL)
12249                                 return -EINVAL;
12250                 } else {
12251                         if (speed != SPEED_100 &&
12252                             speed != SPEED_10)
12253                                 return -EINVAL;
12254                 }
12255         }
12256
12257         tg3_full_lock(tp, 0);
12258
12259         tp->link_config.autoneg = cmd->base.autoneg;
12260         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12261                 tp->link_config.advertising = (advertising |
12262                                               ADVERTISED_Autoneg);
12263                 tp->link_config.speed = SPEED_UNKNOWN;
12264                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12265         } else {
12266                 tp->link_config.advertising = 0;
12267                 tp->link_config.speed = speed;
12268                 tp->link_config.duplex = cmd->base.duplex;
12269         }
12270
12271         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12272
12273         tg3_warn_mgmt_link_flap(tp);
12274
12275         if (netif_running(dev))
12276                 tg3_setup_phy(tp, true);
12277
12278         tg3_full_unlock(tp);
12279
12280         return 0;
12281 }
12282
12283 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12284 {
12285         struct tg3 *tp = netdev_priv(dev);
12286
12287         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12288         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12289         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12290         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12291 }
12292
12293 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12294 {
12295         struct tg3 *tp = netdev_priv(dev);
12296
12297         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12298                 wol->supported = WAKE_MAGIC;
12299         else
12300                 wol->supported = 0;
12301         wol->wolopts = 0;
12302         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12303                 wol->wolopts = WAKE_MAGIC;
12304         memset(&wol->sopass, 0, sizeof(wol->sopass));
12305 }
12306
12307 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12308 {
12309         struct tg3 *tp = netdev_priv(dev);
12310         struct device *dp = &tp->pdev->dev;
12311
12312         if (wol->wolopts & ~WAKE_MAGIC)
12313                 return -EINVAL;
12314         if ((wol->wolopts & WAKE_MAGIC) &&
12315             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12316                 return -EINVAL;
12317
12318         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12319
12320         if (device_may_wakeup(dp))
12321                 tg3_flag_set(tp, WOL_ENABLE);
12322         else
12323                 tg3_flag_clear(tp, WOL_ENABLE);
12324
12325         return 0;
12326 }
12327
12328 static u32 tg3_get_msglevel(struct net_device *dev)
12329 {
12330         struct tg3 *tp = netdev_priv(dev);
12331         return tp->msg_enable;
12332 }
12333
12334 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12335 {
12336         struct tg3 *tp = netdev_priv(dev);
12337         tp->msg_enable = value;
12338 }
12339
12340 static int tg3_nway_reset(struct net_device *dev)
12341 {
12342         struct tg3 *tp = netdev_priv(dev);
12343         int r;
12344
12345         if (!netif_running(dev))
12346                 return -EAGAIN;
12347
12348         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12349                 return -EINVAL;
12350
12351         tg3_warn_mgmt_link_flap(tp);
12352
12353         if (tg3_flag(tp, USE_PHYLIB)) {
12354                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12355                         return -EAGAIN;
12356                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12357         } else {
12358                 u32 bmcr;
12359
12360                 spin_lock_bh(&tp->lock);
12361                 r = -EINVAL;
12362                 tg3_readphy(tp, MII_BMCR, &bmcr);
12363                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12364                     ((bmcr & BMCR_ANENABLE) ||
12365                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12366                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12367                                                    BMCR_ANENABLE);
12368                         r = 0;
12369                 }
12370                 spin_unlock_bh(&tp->lock);
12371         }
12372
12373         return r;
12374 }
12375
12376 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12377 {
12378         struct tg3 *tp = netdev_priv(dev);
12379
12380         ering->rx_max_pending = tp->rx_std_ring_mask;
12381         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12382                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12383         else
12384                 ering->rx_jumbo_max_pending = 0;
12385
12386         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12387
12388         ering->rx_pending = tp->rx_pending;
12389         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12390                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12391         else
12392                 ering->rx_jumbo_pending = 0;
12393
12394         ering->tx_pending = tp->napi[0].tx_pending;
12395 }
12396
12397 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12398 {
12399         struct tg3 *tp = netdev_priv(dev);
12400         int i, irq_sync = 0, err = 0;
12401
12402         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12403             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12404             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12405             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12406             (tg3_flag(tp, TSO_BUG) &&
12407              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12408                 return -EINVAL;
12409
12410         if (netif_running(dev)) {
12411                 tg3_phy_stop(tp);
12412                 tg3_netif_stop(tp);
12413                 irq_sync = 1;
12414         }
12415
12416         tg3_full_lock(tp, irq_sync);
12417
12418         tp->rx_pending = ering->rx_pending;
12419
12420         if (tg3_flag(tp, MAX_RXPEND_64) &&
12421             tp->rx_pending > 63)
12422                 tp->rx_pending = 63;
12423
12424         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12425                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12426
12427         for (i = 0; i < tp->irq_max; i++)
12428                 tp->napi[i].tx_pending = ering->tx_pending;
12429
12430         if (netif_running(dev)) {
12431                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12432                 err = tg3_restart_hw(tp, false);
12433                 if (!err)
12434                         tg3_netif_start(tp);
12435         }
12436
12437         tg3_full_unlock(tp);
12438
12439         if (irq_sync && !err)
12440                 tg3_phy_start(tp);
12441
12442         return err;
12443 }
12444
12445 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12446 {
12447         struct tg3 *tp = netdev_priv(dev);
12448
12449         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12450
12451         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12452                 epause->rx_pause = 1;
12453         else
12454                 epause->rx_pause = 0;
12455
12456         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12457                 epause->tx_pause = 1;
12458         else
12459                 epause->tx_pause = 0;
12460 }
12461
12462 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12463 {
12464         struct tg3 *tp = netdev_priv(dev);
12465         int err = 0;
12466
12467         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12468                 tg3_warn_mgmt_link_flap(tp);
12469
12470         if (tg3_flag(tp, USE_PHYLIB)) {
12471                 u32 newadv;
12472                 struct phy_device *phydev;
12473
12474                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12475
12476                 if (!(phydev->supported & SUPPORTED_Pause) ||
12477                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12478                      (epause->rx_pause != epause->tx_pause)))
12479                         return -EINVAL;
12480
12481                 tp->link_config.flowctrl = 0;
12482                 if (epause->rx_pause) {
12483                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12484
12485                         if (epause->tx_pause) {
12486                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12487                                 newadv = ADVERTISED_Pause;
12488                         } else
12489                                 newadv = ADVERTISED_Pause |
12490                                          ADVERTISED_Asym_Pause;
12491                 } else if (epause->tx_pause) {
12492                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12493                         newadv = ADVERTISED_Asym_Pause;
12494                 } else
12495                         newadv = 0;
12496
12497                 if (epause->autoneg)
12498                         tg3_flag_set(tp, PAUSE_AUTONEG);
12499                 else
12500                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12501
12502                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12503                         u32 oldadv = phydev->advertising &
12504                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12505                         if (oldadv != newadv) {
12506                                 phydev->advertising &=
12507                                         ~(ADVERTISED_Pause |
12508                                           ADVERTISED_Asym_Pause);
12509                                 phydev->advertising |= newadv;
12510                                 if (phydev->autoneg) {
12511                                         /*
12512                                          * Always renegotiate the link to
12513                                          * inform our link partner of our
12514                                          * flow control settings, even if the
12515                                          * flow control is forced.  Let
12516                                          * tg3_adjust_link() do the final
12517                                          * flow control setup.
12518                                          */
12519                                         return phy_start_aneg(phydev);
12520                                 }
12521                         }
12522
12523                         if (!epause->autoneg)
12524                                 tg3_setup_flow_control(tp, 0, 0);
12525                 } else {
12526                         tp->link_config.advertising &=
12527                                         ~(ADVERTISED_Pause |
12528                                           ADVERTISED_Asym_Pause);
12529                         tp->link_config.advertising |= newadv;
12530                 }
12531         } else {
12532                 int irq_sync = 0;
12533
12534                 if (netif_running(dev)) {
12535                         tg3_netif_stop(tp);
12536                         irq_sync = 1;
12537                 }
12538
12539                 tg3_full_lock(tp, irq_sync);
12540
12541                 if (epause->autoneg)
12542                         tg3_flag_set(tp, PAUSE_AUTONEG);
12543                 else
12544                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12545                 if (epause->rx_pause)
12546                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12547                 else
12548                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12549                 if (epause->tx_pause)
12550                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12551                 else
12552                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12553
12554                 if (netif_running(dev)) {
12555                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12556                         err = tg3_restart_hw(tp, false);
12557                         if (!err)
12558                                 tg3_netif_start(tp);
12559                 }
12560
12561                 tg3_full_unlock(tp);
12562         }
12563
12564         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12565
12566         return err;
12567 }
12568
12569 static int tg3_get_sset_count(struct net_device *dev, int sset)
12570 {
12571         switch (sset) {
12572         case ETH_SS_TEST:
12573                 return TG3_NUM_TEST;
12574         case ETH_SS_STATS:
12575                 return TG3_NUM_STATS;
12576         default:
12577                 return -EOPNOTSUPP;
12578         }
12579 }
12580
12581 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12582                          u32 *rules __always_unused)
12583 {
12584         struct tg3 *tp = netdev_priv(dev);
12585
12586         if (!tg3_flag(tp, SUPPORT_MSIX))
12587                 return -EOPNOTSUPP;
12588
12589         switch (info->cmd) {
12590         case ETHTOOL_GRXRINGS:
12591                 if (netif_running(tp->dev))
12592                         info->data = tp->rxq_cnt;
12593                 else {
12594                         info->data = num_online_cpus();
12595                         if (info->data > TG3_RSS_MAX_NUM_QS)
12596                                 info->data = TG3_RSS_MAX_NUM_QS;
12597                 }
12598
12599                 return 0;
12600
12601         default:
12602                 return -EOPNOTSUPP;
12603         }
12604 }
12605
12606 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12607 {
12608         u32 size = 0;
12609         struct tg3 *tp = netdev_priv(dev);
12610
12611         if (tg3_flag(tp, SUPPORT_MSIX))
12612                 size = TG3_RSS_INDIR_TBL_SIZE;
12613
12614         return size;
12615 }
12616
12617 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12618 {
12619         struct tg3 *tp = netdev_priv(dev);
12620         int i;
12621
12622         if (hfunc)
12623                 *hfunc = ETH_RSS_HASH_TOP;
12624         if (!indir)
12625                 return 0;
12626
12627         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12628                 indir[i] = tp->rss_ind_tbl[i];
12629
12630         return 0;
12631 }
12632
12633 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12634                         const u8 hfunc)
12635 {
12636         struct tg3 *tp = netdev_priv(dev);
12637         size_t i;
12638
12639         /* We require at least one supported parameter to be changed and no
12640          * change in any of the unsupported parameters
12641          */
12642         if (key ||
12643             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12644                 return -EOPNOTSUPP;
12645
12646         if (!indir)
12647                 return 0;
12648
12649         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12650                 tp->rss_ind_tbl[i] = indir[i];
12651
12652         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12653                 return 0;
12654
12655         /* It is legal to write the indirection
12656          * table while the device is running.
12657          */
12658         tg3_full_lock(tp, 0);
12659         tg3_rss_write_indir_tbl(tp);
12660         tg3_full_unlock(tp);
12661
12662         return 0;
12663 }
12664
12665 static void tg3_get_channels(struct net_device *dev,
12666                              struct ethtool_channels *channel)
12667 {
12668         struct tg3 *tp = netdev_priv(dev);
12669         u32 deflt_qs = netif_get_num_default_rss_queues();
12670
12671         channel->max_rx = tp->rxq_max;
12672         channel->max_tx = tp->txq_max;
12673
12674         if (netif_running(dev)) {
12675                 channel->rx_count = tp->rxq_cnt;
12676                 channel->tx_count = tp->txq_cnt;
12677         } else {
12678                 if (tp->rxq_req)
12679                         channel->rx_count = tp->rxq_req;
12680                 else
12681                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12682
12683                 if (tp->txq_req)
12684                         channel->tx_count = tp->txq_req;
12685                 else
12686                         channel->tx_count = min(deflt_qs, tp->txq_max);
12687         }
12688 }
12689
12690 static int tg3_set_channels(struct net_device *dev,
12691                             struct ethtool_channels *channel)
12692 {
12693         struct tg3 *tp = netdev_priv(dev);
12694
12695         if (!tg3_flag(tp, SUPPORT_MSIX))
12696                 return -EOPNOTSUPP;
12697
12698         if (channel->rx_count > tp->rxq_max ||
12699             channel->tx_count > tp->txq_max)
12700                 return -EINVAL;
12701
12702         tp->rxq_req = channel->rx_count;
12703         tp->txq_req = channel->tx_count;
12704
12705         if (!netif_running(dev))
12706                 return 0;
12707
12708         tg3_stop(tp);
12709
12710         tg3_carrier_off(tp);
12711
12712         tg3_start(tp, true, false, false);
12713
12714         return 0;
12715 }
12716
12717 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12718 {
12719         switch (stringset) {
12720         case ETH_SS_STATS:
12721                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12722                 break;
12723         case ETH_SS_TEST:
12724                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12725                 break;
12726         default:
12727                 WARN_ON(1);     /* we need a WARN() */
12728                 break;
12729         }
12730 }
12731
12732 static int tg3_set_phys_id(struct net_device *dev,
12733                             enum ethtool_phys_id_state state)
12734 {
12735         struct tg3 *tp = netdev_priv(dev);
12736
12737         if (!netif_running(tp->dev))
12738                 return -EAGAIN;
12739
12740         switch (state) {
12741         case ETHTOOL_ID_ACTIVE:
12742                 return 1;       /* cycle on/off once per second */
12743
12744         case ETHTOOL_ID_ON:
12745                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12746                      LED_CTRL_1000MBPS_ON |
12747                      LED_CTRL_100MBPS_ON |
12748                      LED_CTRL_10MBPS_ON |
12749                      LED_CTRL_TRAFFIC_OVERRIDE |
12750                      LED_CTRL_TRAFFIC_BLINK |
12751                      LED_CTRL_TRAFFIC_LED);
12752                 break;
12753
12754         case ETHTOOL_ID_OFF:
12755                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12756                      LED_CTRL_TRAFFIC_OVERRIDE);
12757                 break;
12758
12759         case ETHTOOL_ID_INACTIVE:
12760                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12761                 break;
12762         }
12763
12764         return 0;
12765 }
12766
12767 static void tg3_get_ethtool_stats(struct net_device *dev,
12768                                    struct ethtool_stats *estats, u64 *tmp_stats)
12769 {
12770         struct tg3 *tp = netdev_priv(dev);
12771
12772         if (tp->hw_stats)
12773                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12774         else
12775                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12776 }
12777
12778 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12779 {
12780         int i;
12781         __be32 *buf;
12782         u32 offset = 0, len = 0;
12783         u32 magic, val;
12784
12785         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12786                 return NULL;
12787
12788         if (magic == TG3_EEPROM_MAGIC) {
12789                 for (offset = TG3_NVM_DIR_START;
12790                      offset < TG3_NVM_DIR_END;
12791                      offset += TG3_NVM_DIRENT_SIZE) {
12792                         if (tg3_nvram_read(tp, offset, &val))
12793                                 return NULL;
12794
12795                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12796                             TG3_NVM_DIRTYPE_EXTVPD)
12797                                 break;
12798                 }
12799
12800                 if (offset != TG3_NVM_DIR_END) {
12801                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12802                         if (tg3_nvram_read(tp, offset + 4, &offset))
12803                                 return NULL;
12804
12805                         offset = tg3_nvram_logical_addr(tp, offset);
12806                 }
12807         }
12808
12809         if (!offset || !len) {
12810                 offset = TG3_NVM_VPD_OFF;
12811                 len = TG3_NVM_VPD_LEN;
12812         }
12813
12814         buf = kmalloc(len, GFP_KERNEL);
12815         if (buf == NULL)
12816                 return NULL;
12817
12818         if (magic == TG3_EEPROM_MAGIC) {
12819                 for (i = 0; i < len; i += 4) {
12820                         /* The data is in little-endian format in NVRAM.
12821                          * Use the big-endian read routines to preserve
12822                          * the byte order as it exists in NVRAM.
12823                          */
12824                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12825                                 goto error;
12826                 }
12827         } else {
12828                 u8 *ptr;
12829                 ssize_t cnt;
12830                 unsigned int pos = 0;
12831
12832                 ptr = (u8 *)&buf[0];
12833                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12834                         cnt = pci_read_vpd(tp->pdev, pos,
12835                                            len - pos, ptr);
12836                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12837                                 cnt = 0;
12838                         else if (cnt < 0)
12839                                 goto error;
12840                 }
12841                 if (pos != len)
12842                         goto error;
12843         }
12844
12845         *vpdlen = len;
12846
12847         return buf;
12848
12849 error:
12850         kfree(buf);
12851         return NULL;
12852 }
12853
12854 #define NVRAM_TEST_SIZE 0x100
12855 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12856 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12857 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12858 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12859 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12860 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12861 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12862 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12863
12864 static int tg3_test_nvram(struct tg3 *tp)
12865 {
12866         u32 csum, magic, len;
12867         __be32 *buf;
12868         int i, j, k, err = 0, size;
12869
12870         if (tg3_flag(tp, NO_NVRAM))
12871                 return 0;
12872
12873         if (tg3_nvram_read(tp, 0, &magic) != 0)
12874                 return -EIO;
12875
12876         if (magic == TG3_EEPROM_MAGIC)
12877                 size = NVRAM_TEST_SIZE;
12878         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12879                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12880                     TG3_EEPROM_SB_FORMAT_1) {
12881                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12882                         case TG3_EEPROM_SB_REVISION_0:
12883                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12884                                 break;
12885                         case TG3_EEPROM_SB_REVISION_2:
12886                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12887                                 break;
12888                         case TG3_EEPROM_SB_REVISION_3:
12889                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12890                                 break;
12891                         case TG3_EEPROM_SB_REVISION_4:
12892                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12893                                 break;
12894                         case TG3_EEPROM_SB_REVISION_5:
12895                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12896                                 break;
12897                         case TG3_EEPROM_SB_REVISION_6:
12898                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12899                                 break;
12900                         default:
12901                                 return -EIO;
12902                         }
12903                 } else
12904                         return 0;
12905         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12906                 size = NVRAM_SELFBOOT_HW_SIZE;
12907         else
12908                 return -EIO;
12909
12910         buf = kmalloc(size, GFP_KERNEL);
12911         if (buf == NULL)
12912                 return -ENOMEM;
12913
12914         err = -EIO;
12915         for (i = 0, j = 0; i < size; i += 4, j++) {
12916                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12917                 if (err)
12918                         break;
12919         }
12920         if (i < size)
12921                 goto out;
12922
12923         /* Selfboot format */
12924         magic = be32_to_cpu(buf[0]);
12925         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12926             TG3_EEPROM_MAGIC_FW) {
12927                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12928
12929                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12930                     TG3_EEPROM_SB_REVISION_2) {
12931                         /* For rev 2, the csum doesn't include the MBA. */
12932                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12933                                 csum8 += buf8[i];
12934                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12935                                 csum8 += buf8[i];
12936                 } else {
12937                         for (i = 0; i < size; i++)
12938                                 csum8 += buf8[i];
12939                 }
12940
12941                 if (csum8 == 0) {
12942                         err = 0;
12943                         goto out;
12944                 }
12945
12946                 err = -EIO;
12947                 goto out;
12948         }
12949
12950         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12951             TG3_EEPROM_MAGIC_HW) {
12952                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12953                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12954                 u8 *buf8 = (u8 *) buf;
12955
12956                 /* Separate the parity bits and the data bytes.  */
12957                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12958                         if ((i == 0) || (i == 8)) {
12959                                 int l;
12960                                 u8 msk;
12961
12962                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12963                                         parity[k++] = buf8[i] & msk;
12964                                 i++;
12965                         } else if (i == 16) {
12966                                 int l;
12967                                 u8 msk;
12968
12969                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12970                                         parity[k++] = buf8[i] & msk;
12971                                 i++;
12972
12973                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12974                                         parity[k++] = buf8[i] & msk;
12975                                 i++;
12976                         }
12977                         data[j++] = buf8[i];
12978                 }
12979
12980                 err = -EIO;
12981                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12982                         u8 hw8 = hweight8(data[i]);
12983
12984                         if ((hw8 & 0x1) && parity[i])
12985                                 goto out;
12986                         else if (!(hw8 & 0x1) && !parity[i])
12987                                 goto out;
12988                 }
12989                 err = 0;
12990                 goto out;
12991         }
12992
12993         err = -EIO;
12994
12995         /* Bootstrap checksum at offset 0x10 */
12996         csum = calc_crc((unsigned char *) buf, 0x10);
12997         if (csum != le32_to_cpu(buf[0x10/4]))
12998                 goto out;
12999
13000         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13001         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13002         if (csum != le32_to_cpu(buf[0xfc/4]))
13003                 goto out;
13004
13005         kfree(buf);
13006
13007         buf = tg3_vpd_readblock(tp, &len);
13008         if (!buf)
13009                 return -ENOMEM;
13010
13011         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13012         if (i > 0) {
13013                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13014                 if (j < 0)
13015                         goto out;
13016
13017                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13018                         goto out;
13019
13020                 i += PCI_VPD_LRDT_TAG_SIZE;
13021                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13022                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13023                 if (j > 0) {
13024                         u8 csum8 = 0;
13025
13026                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13027
13028                         for (i = 0; i <= j; i++)
13029                                 csum8 += ((u8 *)buf)[i];
13030
13031                         if (csum8)
13032                                 goto out;
13033                 }
13034         }
13035
13036         err = 0;
13037
13038 out:
13039         kfree(buf);
13040         return err;
13041 }
13042
13043 #define TG3_SERDES_TIMEOUT_SEC  2
13044 #define TG3_COPPER_TIMEOUT_SEC  6
13045
13046 static int tg3_test_link(struct tg3 *tp)
13047 {
13048         int i, max;
13049
13050         if (!netif_running(tp->dev))
13051                 return -ENODEV;
13052
13053         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13054                 max = TG3_SERDES_TIMEOUT_SEC;
13055         else
13056                 max = TG3_COPPER_TIMEOUT_SEC;
13057
13058         for (i = 0; i < max; i++) {
13059                 if (tp->link_up)
13060                         return 0;
13061
13062                 if (msleep_interruptible(1000))
13063                         break;
13064         }
13065
13066         return -EIO;
13067 }
13068
13069 /* Only test the commonly used registers */
13070 static int tg3_test_registers(struct tg3 *tp)
13071 {
13072         int i, is_5705, is_5750;
13073         u32 offset, read_mask, write_mask, val, save_val, read_val;
13074         static struct {
13075                 u16 offset;
13076                 u16 flags;
13077 #define TG3_FL_5705     0x1
13078 #define TG3_FL_NOT_5705 0x2
13079 #define TG3_FL_NOT_5788 0x4
13080 #define TG3_FL_NOT_5750 0x8
13081                 u32 read_mask;
13082                 u32 write_mask;
13083         } reg_tbl[] = {
13084                 /* MAC Control Registers */
13085                 { MAC_MODE, TG3_FL_NOT_5705,
13086                         0x00000000, 0x00ef6f8c },
13087                 { MAC_MODE, TG3_FL_5705,
13088                         0x00000000, 0x01ef6b8c },
13089                 { MAC_STATUS, TG3_FL_NOT_5705,
13090                         0x03800107, 0x00000000 },
13091                 { MAC_STATUS, TG3_FL_5705,
13092                         0x03800100, 0x00000000 },
13093                 { MAC_ADDR_0_HIGH, 0x0000,
13094                         0x00000000, 0x0000ffff },
13095                 { MAC_ADDR_0_LOW, 0x0000,
13096                         0x00000000, 0xffffffff },
13097                 { MAC_RX_MTU_SIZE, 0x0000,
13098                         0x00000000, 0x0000ffff },
13099                 { MAC_TX_MODE, 0x0000,
13100                         0x00000000, 0x00000070 },
13101                 { MAC_TX_LENGTHS, 0x0000,
13102                         0x00000000, 0x00003fff },
13103                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13104                         0x00000000, 0x000007fc },
13105                 { MAC_RX_MODE, TG3_FL_5705,
13106                         0x00000000, 0x000007dc },
13107                 { MAC_HASH_REG_0, 0x0000,
13108                         0x00000000, 0xffffffff },
13109                 { MAC_HASH_REG_1, 0x0000,
13110                         0x00000000, 0xffffffff },
13111                 { MAC_HASH_REG_2, 0x0000,
13112                         0x00000000, 0xffffffff },
13113                 { MAC_HASH_REG_3, 0x0000,
13114                         0x00000000, 0xffffffff },
13115
13116                 /* Receive Data and Receive BD Initiator Control Registers. */
13117                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13118                         0x00000000, 0xffffffff },
13119                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13120                         0x00000000, 0xffffffff },
13121                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13122                         0x00000000, 0x00000003 },
13123                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13124                         0x00000000, 0xffffffff },
13125                 { RCVDBDI_STD_BD+0, 0x0000,
13126                         0x00000000, 0xffffffff },
13127                 { RCVDBDI_STD_BD+4, 0x0000,
13128                         0x00000000, 0xffffffff },
13129                 { RCVDBDI_STD_BD+8, 0x0000,
13130                         0x00000000, 0xffff0002 },
13131                 { RCVDBDI_STD_BD+0xc, 0x0000,
13132                         0x00000000, 0xffffffff },
13133
13134                 /* Receive BD Initiator Control Registers. */
13135                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13136                         0x00000000, 0xffffffff },
13137                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13138                         0x00000000, 0x000003ff },
13139                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13140                         0x00000000, 0xffffffff },
13141
13142                 /* Host Coalescing Control Registers. */
13143                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13144                         0x00000000, 0x00000004 },
13145                 { HOSTCC_MODE, TG3_FL_5705,
13146                         0x00000000, 0x000000f6 },
13147                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13148                         0x00000000, 0xffffffff },
13149                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13150                         0x00000000, 0x000003ff },
13151                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13152                         0x00000000, 0xffffffff },
13153                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13154                         0x00000000, 0x000003ff },
13155                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13156                         0x00000000, 0xffffffff },
13157                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13158                         0x00000000, 0x000000ff },
13159                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13160                         0x00000000, 0xffffffff },
13161                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13162                         0x00000000, 0x000000ff },
13163                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13164                         0x00000000, 0xffffffff },
13165                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13166                         0x00000000, 0xffffffff },
13167                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13168                         0x00000000, 0xffffffff },
13169                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13170                         0x00000000, 0x000000ff },
13171                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13172                         0x00000000, 0xffffffff },
13173                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13174                         0x00000000, 0x000000ff },
13175                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13176                         0x00000000, 0xffffffff },
13177                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13178                         0x00000000, 0xffffffff },
13179                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13180                         0x00000000, 0xffffffff },
13181                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13182                         0x00000000, 0xffffffff },
13183                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13184                         0x00000000, 0xffffffff },
13185                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13186                         0xffffffff, 0x00000000 },
13187                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13188                         0xffffffff, 0x00000000 },
13189
13190                 /* Buffer Manager Control Registers. */
13191                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13192                         0x00000000, 0x007fff80 },
13193                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13194                         0x00000000, 0x007fffff },
13195                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13196                         0x00000000, 0x0000003f },
13197                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13198                         0x00000000, 0x000001ff },
13199                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13200                         0x00000000, 0x000001ff },
13201                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13202                         0xffffffff, 0x00000000 },
13203                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13204                         0xffffffff, 0x00000000 },
13205
13206                 /* Mailbox Registers */
13207                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13208                         0x00000000, 0x000001ff },
13209                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13210                         0x00000000, 0x000001ff },
13211                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13212                         0x00000000, 0x000007ff },
13213                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13214                         0x00000000, 0x000001ff },
13215
13216                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13217         };
13218
13219         is_5705 = is_5750 = 0;
13220         if (tg3_flag(tp, 5705_PLUS)) {
13221                 is_5705 = 1;
13222                 if (tg3_flag(tp, 5750_PLUS))
13223                         is_5750 = 1;
13224         }
13225
13226         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13227                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13228                         continue;
13229
13230                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13231                         continue;
13232
13233                 if (tg3_flag(tp, IS_5788) &&
13234                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13235                         continue;
13236
13237                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13238                         continue;
13239
13240                 offset = (u32) reg_tbl[i].offset;
13241                 read_mask = reg_tbl[i].read_mask;
13242                 write_mask = reg_tbl[i].write_mask;
13243
13244                 /* Save the original register content */
13245                 save_val = tr32(offset);
13246
13247                 /* Determine the read-only value. */
13248                 read_val = save_val & read_mask;
13249
13250                 /* Write zero to the register, then make sure the read-only bits
13251                  * are not changed and the read/write bits are all zeros.
13252                  */
13253                 tw32(offset, 0);
13254
13255                 val = tr32(offset);
13256
13257                 /* Test the read-only and read/write bits. */
13258                 if (((val & read_mask) != read_val) || (val & write_mask))
13259                         goto out;
13260
13261                 /* Write ones to all the bits defined by RdMask and WrMask, then
13262                  * make sure the read-only bits are not changed and the
13263                  * read/write bits are all ones.
13264                  */
13265                 tw32(offset, read_mask | write_mask);
13266
13267                 val = tr32(offset);
13268
13269                 /* Test the read-only bits. */
13270                 if ((val & read_mask) != read_val)
13271                         goto out;
13272
13273                 /* Test the read/write bits. */
13274                 if ((val & write_mask) != write_mask)
13275                         goto out;
13276
13277                 tw32(offset, save_val);
13278         }
13279
13280         return 0;
13281
13282 out:
13283         if (netif_msg_hw(tp))
13284                 netdev_err(tp->dev,
13285                            "Register test failed at offset %x\n", offset);
13286         tw32(offset, save_val);
13287         return -EIO;
13288 }
13289
13290 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13291 {
13292         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13293         int i;
13294         u32 j;
13295
13296         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13297                 for (j = 0; j < len; j += 4) {
13298                         u32 val;
13299
13300                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13301                         tg3_read_mem(tp, offset + j, &val);
13302                         if (val != test_pattern[i])
13303                                 return -EIO;
13304                 }
13305         }
13306         return 0;
13307 }
13308
13309 static int tg3_test_memory(struct tg3 *tp)
13310 {
13311         static struct mem_entry {
13312                 u32 offset;
13313                 u32 len;
13314         } mem_tbl_570x[] = {
13315                 { 0x00000000, 0x00b50},
13316                 { 0x00002000, 0x1c000},
13317                 { 0xffffffff, 0x00000}
13318         }, mem_tbl_5705[] = {
13319                 { 0x00000100, 0x0000c},
13320                 { 0x00000200, 0x00008},
13321                 { 0x00004000, 0x00800},
13322                 { 0x00006000, 0x01000},
13323                 { 0x00008000, 0x02000},
13324                 { 0x00010000, 0x0e000},
13325                 { 0xffffffff, 0x00000}
13326         }, mem_tbl_5755[] = {
13327                 { 0x00000200, 0x00008},
13328                 { 0x00004000, 0x00800},
13329                 { 0x00006000, 0x00800},
13330                 { 0x00008000, 0x02000},
13331                 { 0x00010000, 0x0c000},
13332                 { 0xffffffff, 0x00000}
13333         }, mem_tbl_5906[] = {
13334                 { 0x00000200, 0x00008},
13335                 { 0x00004000, 0x00400},
13336                 { 0x00006000, 0x00400},
13337                 { 0x00008000, 0x01000},
13338                 { 0x00010000, 0x01000},
13339                 { 0xffffffff, 0x00000}
13340         }, mem_tbl_5717[] = {
13341                 { 0x00000200, 0x00008},
13342                 { 0x00010000, 0x0a000},
13343                 { 0x00020000, 0x13c00},
13344                 { 0xffffffff, 0x00000}
13345         }, mem_tbl_57765[] = {
13346                 { 0x00000200, 0x00008},
13347                 { 0x00004000, 0x00800},
13348                 { 0x00006000, 0x09800},
13349                 { 0x00010000, 0x0a000},
13350                 { 0xffffffff, 0x00000}
13351         };
13352         struct mem_entry *mem_tbl;
13353         int err = 0;
13354         int i;
13355
13356         if (tg3_flag(tp, 5717_PLUS))
13357                 mem_tbl = mem_tbl_5717;
13358         else if (tg3_flag(tp, 57765_CLASS) ||
13359                  tg3_asic_rev(tp) == ASIC_REV_5762)
13360                 mem_tbl = mem_tbl_57765;
13361         else if (tg3_flag(tp, 5755_PLUS))
13362                 mem_tbl = mem_tbl_5755;
13363         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13364                 mem_tbl = mem_tbl_5906;
13365         else if (tg3_flag(tp, 5705_PLUS))
13366                 mem_tbl = mem_tbl_5705;
13367         else
13368                 mem_tbl = mem_tbl_570x;
13369
13370         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13371                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13372                 if (err)
13373                         break;
13374         }
13375
13376         return err;
13377 }
13378
13379 #define TG3_TSO_MSS             500
13380
13381 #define TG3_TSO_IP_HDR_LEN      20
13382 #define TG3_TSO_TCP_HDR_LEN     20
13383 #define TG3_TSO_TCP_OPT_LEN     12
13384
13385 static const u8 tg3_tso_header[] = {
13386 0x08, 0x00,
13387 0x45, 0x00, 0x00, 0x00,
13388 0x00, 0x00, 0x40, 0x00,
13389 0x40, 0x06, 0x00, 0x00,
13390 0x0a, 0x00, 0x00, 0x01,
13391 0x0a, 0x00, 0x00, 0x02,
13392 0x0d, 0x00, 0xe0, 0x00,
13393 0x00, 0x00, 0x01, 0x00,
13394 0x00, 0x00, 0x02, 0x00,
13395 0x80, 0x10, 0x10, 0x00,
13396 0x14, 0x09, 0x00, 0x00,
13397 0x01, 0x01, 0x08, 0x0a,
13398 0x11, 0x11, 0x11, 0x11,
13399 0x11, 0x11, 0x11, 0x11,
13400 };
13401
13402 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13403 {
13404         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13405         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13406         u32 budget;
13407         struct sk_buff *skb;
13408         u8 *tx_data, *rx_data;
13409         dma_addr_t map;
13410         int num_pkts, tx_len, rx_len, i, err;
13411         struct tg3_rx_buffer_desc *desc;
13412         struct tg3_napi *tnapi, *rnapi;
13413         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13414
13415         tnapi = &tp->napi[0];
13416         rnapi = &tp->napi[0];
13417         if (tp->irq_cnt > 1) {
13418                 if (tg3_flag(tp, ENABLE_RSS))
13419                         rnapi = &tp->napi[1];
13420                 if (tg3_flag(tp, ENABLE_TSS))
13421                         tnapi = &tp->napi[1];
13422         }
13423         coal_now = tnapi->coal_now | rnapi->coal_now;
13424
13425         err = -EIO;
13426
13427         tx_len = pktsz;
13428         skb = netdev_alloc_skb(tp->dev, tx_len);
13429         if (!skb)
13430                 return -ENOMEM;
13431
13432         tx_data = skb_put(skb, tx_len);
13433         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13434         memset(tx_data + ETH_ALEN, 0x0, 8);
13435
13436         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13437
13438         if (tso_loopback) {
13439                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13440
13441                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13442                               TG3_TSO_TCP_OPT_LEN;
13443
13444                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13445                        sizeof(tg3_tso_header));
13446                 mss = TG3_TSO_MSS;
13447
13448                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13449                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13450
13451                 /* Set the total length field in the IP header */
13452                 iph->tot_len = htons((u16)(mss + hdr_len));
13453
13454                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13455                               TXD_FLAG_CPU_POST_DMA);
13456
13457                 if (tg3_flag(tp, HW_TSO_1) ||
13458                     tg3_flag(tp, HW_TSO_2) ||
13459                     tg3_flag(tp, HW_TSO_3)) {
13460                         struct tcphdr *th;
13461                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13462                         th = (struct tcphdr *)&tx_data[val];
13463                         th->check = 0;
13464                 } else
13465                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13466
13467                 if (tg3_flag(tp, HW_TSO_3)) {
13468                         mss |= (hdr_len & 0xc) << 12;
13469                         if (hdr_len & 0x10)
13470                                 base_flags |= 0x00000010;
13471                         base_flags |= (hdr_len & 0x3e0) << 5;
13472                 } else if (tg3_flag(tp, HW_TSO_2))
13473                         mss |= hdr_len << 9;
13474                 else if (tg3_flag(tp, HW_TSO_1) ||
13475                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13476                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13477                 } else {
13478                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13479                 }
13480
13481                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13482         } else {
13483                 num_pkts = 1;
13484                 data_off = ETH_HLEN;
13485
13486                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13487                     tx_len > VLAN_ETH_FRAME_LEN)
13488                         base_flags |= TXD_FLAG_JMB_PKT;
13489         }
13490
13491         for (i = data_off; i < tx_len; i++)
13492                 tx_data[i] = (u8) (i & 0xff);
13493
13494         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13495         if (pci_dma_mapping_error(tp->pdev, map)) {
13496                 dev_kfree_skb(skb);
13497                 return -EIO;
13498         }
13499
13500         val = tnapi->tx_prod;
13501         tnapi->tx_buffers[val].skb = skb;
13502         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13503
13504         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13505                rnapi->coal_now);
13506
13507         udelay(10);
13508
13509         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13510
13511         budget = tg3_tx_avail(tnapi);
13512         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13513                             base_flags | TXD_FLAG_END, mss, 0)) {
13514                 tnapi->tx_buffers[val].skb = NULL;
13515                 dev_kfree_skb(skb);
13516                 return -EIO;
13517         }
13518
13519         tnapi->tx_prod++;
13520
13521         /* Sync BD data before updating mailbox */
13522         wmb();
13523
13524         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13525         tr32_mailbox(tnapi->prodmbox);
13526
13527         udelay(10);
13528
13529         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13530         for (i = 0; i < 35; i++) {
13531                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13532                        coal_now);
13533
13534                 udelay(10);
13535
13536                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13537                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13538                 if ((tx_idx == tnapi->tx_prod) &&
13539                     (rx_idx == (rx_start_idx + num_pkts)))
13540                         break;
13541         }
13542
13543         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13544         dev_kfree_skb(skb);
13545
13546         if (tx_idx != tnapi->tx_prod)
13547                 goto out;
13548
13549         if (rx_idx != rx_start_idx + num_pkts)
13550                 goto out;
13551
13552         val = data_off;
13553         while (rx_idx != rx_start_idx) {
13554                 desc = &rnapi->rx_rcb[rx_start_idx++];
13555                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13556                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13557
13558                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13559                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13560                         goto out;
13561
13562                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13563                          - ETH_FCS_LEN;
13564
13565                 if (!tso_loopback) {
13566                         if (rx_len != tx_len)
13567                                 goto out;
13568
13569                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13570                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13571                                         goto out;
13572                         } else {
13573                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13574                                         goto out;
13575                         }
13576                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13577                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13578                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13579                         goto out;
13580                 }
13581
13582                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13583                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13584                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13585                                              mapping);
13586                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13587                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13588                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13589                                              mapping);
13590                 } else
13591                         goto out;
13592
13593                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13594                                             PCI_DMA_FROMDEVICE);
13595
13596                 rx_data += TG3_RX_OFFSET(tp);
13597                 for (i = data_off; i < rx_len; i++, val++) {
13598                         if (*(rx_data + i) != (u8) (val & 0xff))
13599                                 goto out;
13600                 }
13601         }
13602
13603         err = 0;
13604
13605         /* tg3_free_rings will unmap and free the rx_data */
13606 out:
13607         return err;
13608 }
13609
13610 #define TG3_STD_LOOPBACK_FAILED         1
13611 #define TG3_JMB_LOOPBACK_FAILED         2
13612 #define TG3_TSO_LOOPBACK_FAILED         4
13613 #define TG3_LOOPBACK_FAILED \
13614         (TG3_STD_LOOPBACK_FAILED | \
13615          TG3_JMB_LOOPBACK_FAILED | \
13616          TG3_TSO_LOOPBACK_FAILED)
13617
13618 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13619 {
13620         int err = -EIO;
13621         u32 eee_cap;
13622         u32 jmb_pkt_sz = 9000;
13623
13624         if (tp->dma_limit)
13625                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13626
13627         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13628         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13629
13630         if (!netif_running(tp->dev)) {
13631                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13632                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13633                 if (do_extlpbk)
13634                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13635                 goto done;
13636         }
13637
13638         err = tg3_reset_hw(tp, true);
13639         if (err) {
13640                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13641                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13642                 if (do_extlpbk)
13643                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13644                 goto done;
13645         }
13646
13647         if (tg3_flag(tp, ENABLE_RSS)) {
13648                 int i;
13649
13650                 /* Reroute all rx packets to the 1st queue */
13651                 for (i = MAC_RSS_INDIR_TBL_0;
13652                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13653                         tw32(i, 0x0);
13654         }
13655
13656         /* HW errata - mac loopback fails in some cases on 5780.
13657          * Normal traffic and PHY loopback are not affected by
13658          * errata.  Also, the MAC loopback test is deprecated for
13659          * all newer ASIC revisions.
13660          */
13661         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13662             !tg3_flag(tp, CPMU_PRESENT)) {
13663                 tg3_mac_loopback(tp, true);
13664
13665                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13666                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13667
13668                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13669                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13670                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13671
13672                 tg3_mac_loopback(tp, false);
13673         }
13674
13675         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13676             !tg3_flag(tp, USE_PHYLIB)) {
13677                 int i;
13678
13679                 tg3_phy_lpbk_set(tp, 0, false);
13680
13681                 /* Wait for link */
13682                 for (i = 0; i < 100; i++) {
13683                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13684                                 break;
13685                         mdelay(1);
13686                 }
13687
13688                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13689                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13690                 if (tg3_flag(tp, TSO_CAPABLE) &&
13691                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13692                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13693                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13694                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13695                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13696
13697                 if (do_extlpbk) {
13698                         tg3_phy_lpbk_set(tp, 0, true);
13699
13700                         /* All link indications report up, but the hardware
13701                          * isn't really ready for about 20 msec.  Double it
13702                          * to be sure.
13703                          */
13704                         mdelay(40);
13705
13706                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13707                                 data[TG3_EXT_LOOPB_TEST] |=
13708                                                         TG3_STD_LOOPBACK_FAILED;
13709                         if (tg3_flag(tp, TSO_CAPABLE) &&
13710                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13711                                 data[TG3_EXT_LOOPB_TEST] |=
13712                                                         TG3_TSO_LOOPBACK_FAILED;
13713                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13714                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13715                                 data[TG3_EXT_LOOPB_TEST] |=
13716                                                         TG3_JMB_LOOPBACK_FAILED;
13717                 }
13718
13719                 /* Re-enable gphy autopowerdown. */
13720                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13721                         tg3_phy_toggle_apd(tp, true);
13722         }
13723
13724         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13725                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13726
13727 done:
13728         tp->phy_flags |= eee_cap;
13729
13730         return err;
13731 }
13732
13733 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13734                           u64 *data)
13735 {
13736         struct tg3 *tp = netdev_priv(dev);
13737         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13738
13739         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13740                 if (tg3_power_up(tp)) {
13741                         etest->flags |= ETH_TEST_FL_FAILED;
13742                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13743                         return;
13744                 }
13745                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13746         }
13747
13748         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13749
13750         if (tg3_test_nvram(tp) != 0) {
13751                 etest->flags |= ETH_TEST_FL_FAILED;
13752                 data[TG3_NVRAM_TEST] = 1;
13753         }
13754         if (!doextlpbk && tg3_test_link(tp)) {
13755                 etest->flags |= ETH_TEST_FL_FAILED;
13756                 data[TG3_LINK_TEST] = 1;
13757         }
13758         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13759                 int err, err2 = 0, irq_sync = 0;
13760
13761                 if (netif_running(dev)) {
13762                         tg3_phy_stop(tp);
13763                         tg3_netif_stop(tp);
13764                         irq_sync = 1;
13765                 }
13766
13767                 tg3_full_lock(tp, irq_sync);
13768                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13769                 err = tg3_nvram_lock(tp);
13770                 tg3_halt_cpu(tp, RX_CPU_BASE);
13771                 if (!tg3_flag(tp, 5705_PLUS))
13772                         tg3_halt_cpu(tp, TX_CPU_BASE);
13773                 if (!err)
13774                         tg3_nvram_unlock(tp);
13775
13776                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13777                         tg3_phy_reset(tp);
13778
13779                 if (tg3_test_registers(tp) != 0) {
13780                         etest->flags |= ETH_TEST_FL_FAILED;
13781                         data[TG3_REGISTER_TEST] = 1;
13782                 }
13783
13784                 if (tg3_test_memory(tp) != 0) {
13785                         etest->flags |= ETH_TEST_FL_FAILED;
13786                         data[TG3_MEMORY_TEST] = 1;
13787                 }
13788
13789                 if (doextlpbk)
13790                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13791
13792                 if (tg3_test_loopback(tp, data, doextlpbk))
13793                         etest->flags |= ETH_TEST_FL_FAILED;
13794
13795                 tg3_full_unlock(tp);
13796
13797                 if (tg3_test_interrupt(tp) != 0) {
13798                         etest->flags |= ETH_TEST_FL_FAILED;
13799                         data[TG3_INTERRUPT_TEST] = 1;
13800                 }
13801
13802                 tg3_full_lock(tp, 0);
13803
13804                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13805                 if (netif_running(dev)) {
13806                         tg3_flag_set(tp, INIT_COMPLETE);
13807                         err2 = tg3_restart_hw(tp, true);
13808                         if (!err2)
13809                                 tg3_netif_start(tp);
13810                 }
13811
13812                 tg3_full_unlock(tp);
13813
13814                 if (irq_sync && !err2)
13815                         tg3_phy_start(tp);
13816         }
13817         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13818                 tg3_power_down_prepare(tp);
13819
13820 }
13821
13822 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13823 {
13824         struct tg3 *tp = netdev_priv(dev);
13825         struct hwtstamp_config stmpconf;
13826
13827         if (!tg3_flag(tp, PTP_CAPABLE))
13828                 return -EOPNOTSUPP;
13829
13830         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13831                 return -EFAULT;
13832
13833         if (stmpconf.flags)
13834                 return -EINVAL;
13835
13836         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13837             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13838                 return -ERANGE;
13839
13840         switch (stmpconf.rx_filter) {
13841         case HWTSTAMP_FILTER_NONE:
13842                 tp->rxptpctl = 0;
13843                 break;
13844         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13845                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13846                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13847                 break;
13848         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13849                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13850                                TG3_RX_PTP_CTL_SYNC_EVNT;
13851                 break;
13852         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13853                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13854                                TG3_RX_PTP_CTL_DELAY_REQ;
13855                 break;
13856         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13857                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13858                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13859                 break;
13860         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13861                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13862                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13863                 break;
13864         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13865                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13866                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13867                 break;
13868         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13869                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13870                                TG3_RX_PTP_CTL_SYNC_EVNT;
13871                 break;
13872         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13873                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13874                                TG3_RX_PTP_CTL_SYNC_EVNT;
13875                 break;
13876         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13877                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13878                                TG3_RX_PTP_CTL_SYNC_EVNT;
13879                 break;
13880         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13881                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13882                                TG3_RX_PTP_CTL_DELAY_REQ;
13883                 break;
13884         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13885                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13886                                TG3_RX_PTP_CTL_DELAY_REQ;
13887                 break;
13888         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13889                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13890                                TG3_RX_PTP_CTL_DELAY_REQ;
13891                 break;
13892         default:
13893                 return -ERANGE;
13894         }
13895
13896         if (netif_running(dev) && tp->rxptpctl)
13897                 tw32(TG3_RX_PTP_CTL,
13898                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13899
13900         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13901                 tg3_flag_set(tp, TX_TSTAMP_EN);
13902         else
13903                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13904
13905         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13906                 -EFAULT : 0;
13907 }
13908
13909 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13910 {
13911         struct tg3 *tp = netdev_priv(dev);
13912         struct hwtstamp_config stmpconf;
13913
13914         if (!tg3_flag(tp, PTP_CAPABLE))
13915                 return -EOPNOTSUPP;
13916
13917         stmpconf.flags = 0;
13918         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13919                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13920
13921         switch (tp->rxptpctl) {
13922         case 0:
13923                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13924                 break;
13925         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13926                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13927                 break;
13928         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13929                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13930                 break;
13931         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13932                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13933                 break;
13934         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13935                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13936                 break;
13937         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13938                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13939                 break;
13940         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13941                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13942                 break;
13943         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13944                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13945                 break;
13946         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13947                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13948                 break;
13949         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13950                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13951                 break;
13952         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13953                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13954                 break;
13955         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13956                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13957                 break;
13958         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13959                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13960                 break;
13961         default:
13962                 WARN_ON_ONCE(1);
13963                 return -ERANGE;
13964         }
13965
13966         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13967                 -EFAULT : 0;
13968 }
13969
13970 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13971 {
13972         struct mii_ioctl_data *data = if_mii(ifr);
13973         struct tg3 *tp = netdev_priv(dev);
13974         int err;
13975
13976         if (tg3_flag(tp, USE_PHYLIB)) {
13977                 struct phy_device *phydev;
13978                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13979                         return -EAGAIN;
13980                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13981                 return phy_mii_ioctl(phydev, ifr, cmd);
13982         }
13983
13984         switch (cmd) {
13985         case SIOCGMIIPHY:
13986                 data->phy_id = tp->phy_addr;
13987
13988                 /* fallthru */
13989         case SIOCGMIIREG: {
13990                 u32 mii_regval;
13991
13992                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13993                         break;                  /* We have no PHY */
13994
13995                 if (!netif_running(dev))
13996                         return -EAGAIN;
13997
13998                 spin_lock_bh(&tp->lock);
13999                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14000                                     data->reg_num & 0x1f, &mii_regval);
14001                 spin_unlock_bh(&tp->lock);
14002
14003                 data->val_out = mii_regval;
14004
14005                 return err;
14006         }
14007
14008         case SIOCSMIIREG:
14009                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14010                         break;                  /* We have no PHY */
14011
14012                 if (!netif_running(dev))
14013                         return -EAGAIN;
14014
14015                 spin_lock_bh(&tp->lock);
14016                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14017                                      data->reg_num & 0x1f, data->val_in);
14018                 spin_unlock_bh(&tp->lock);
14019
14020                 return err;
14021
14022         case SIOCSHWTSTAMP:
14023                 return tg3_hwtstamp_set(dev, ifr);
14024
14025         case SIOCGHWTSTAMP:
14026                 return tg3_hwtstamp_get(dev, ifr);
14027
14028         default:
14029                 /* do nothing */
14030                 break;
14031         }
14032         return -EOPNOTSUPP;
14033 }
14034
14035 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14036 {
14037         struct tg3 *tp = netdev_priv(dev);
14038
14039         memcpy(ec, &tp->coal, sizeof(*ec));
14040         return 0;
14041 }
14042
14043 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14044 {
14045         struct tg3 *tp = netdev_priv(dev);
14046         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14047         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14048
14049         if (!tg3_flag(tp, 5705_PLUS)) {
14050                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14051                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14052                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14053                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14054         }
14055
14056         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14057             (!ec->rx_coalesce_usecs) ||
14058             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14059             (!ec->tx_coalesce_usecs) ||
14060             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14061             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14062             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14063             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14064             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14065             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14066             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14067             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14068                 return -EINVAL;
14069
14070         /* Only copy relevant parameters, ignore all others. */
14071         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14072         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14073         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14074         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14075         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14076         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14077         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14078         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14079         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14080
14081         if (netif_running(dev)) {
14082                 tg3_full_lock(tp, 0);
14083                 __tg3_set_coalesce(tp, &tp->coal);
14084                 tg3_full_unlock(tp);
14085         }
14086         return 0;
14087 }
14088
14089 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14090 {
14091         struct tg3 *tp = netdev_priv(dev);
14092
14093         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14094                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14095                 return -EOPNOTSUPP;
14096         }
14097
14098         if (edata->advertised != tp->eee.advertised) {
14099                 netdev_warn(tp->dev,
14100                             "Direct manipulation of EEE advertisement is not supported\n");
14101                 return -EINVAL;
14102         }
14103
14104         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14105                 netdev_warn(tp->dev,
14106                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14107                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14108                 return -EINVAL;
14109         }
14110
14111         tp->eee = *edata;
14112
14113         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14114         tg3_warn_mgmt_link_flap(tp);
14115
14116         if (netif_running(tp->dev)) {
14117                 tg3_full_lock(tp, 0);
14118                 tg3_setup_eee(tp);
14119                 tg3_phy_reset(tp);
14120                 tg3_full_unlock(tp);
14121         }
14122
14123         return 0;
14124 }
14125
14126 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14127 {
14128         struct tg3 *tp = netdev_priv(dev);
14129
14130         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14131                 netdev_warn(tp->dev,
14132                             "Board does not support EEE!\n");
14133                 return -EOPNOTSUPP;
14134         }
14135
14136         *edata = tp->eee;
14137         return 0;
14138 }
14139
14140 static const struct ethtool_ops tg3_ethtool_ops = {
14141         .get_drvinfo            = tg3_get_drvinfo,
14142         .get_regs_len           = tg3_get_regs_len,
14143         .get_regs               = tg3_get_regs,
14144         .get_wol                = tg3_get_wol,
14145         .set_wol                = tg3_set_wol,
14146         .get_msglevel           = tg3_get_msglevel,
14147         .set_msglevel           = tg3_set_msglevel,
14148         .nway_reset             = tg3_nway_reset,
14149         .get_link               = ethtool_op_get_link,
14150         .get_eeprom_len         = tg3_get_eeprom_len,
14151         .get_eeprom             = tg3_get_eeprom,
14152         .set_eeprom             = tg3_set_eeprom,
14153         .get_ringparam          = tg3_get_ringparam,
14154         .set_ringparam          = tg3_set_ringparam,
14155         .get_pauseparam         = tg3_get_pauseparam,
14156         .set_pauseparam         = tg3_set_pauseparam,
14157         .self_test              = tg3_self_test,
14158         .get_strings            = tg3_get_strings,
14159         .set_phys_id            = tg3_set_phys_id,
14160         .get_ethtool_stats      = tg3_get_ethtool_stats,
14161         .get_coalesce           = tg3_get_coalesce,
14162         .set_coalesce           = tg3_set_coalesce,
14163         .get_sset_count         = tg3_get_sset_count,
14164         .get_rxnfc              = tg3_get_rxnfc,
14165         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14166         .get_rxfh               = tg3_get_rxfh,
14167         .set_rxfh               = tg3_set_rxfh,
14168         .get_channels           = tg3_get_channels,
14169         .set_channels           = tg3_set_channels,
14170         .get_ts_info            = tg3_get_ts_info,
14171         .get_eee                = tg3_get_eee,
14172         .set_eee                = tg3_set_eee,
14173         .get_link_ksettings     = tg3_get_link_ksettings,
14174         .set_link_ksettings     = tg3_set_link_ksettings,
14175 };
14176
14177 static void tg3_get_stats64(struct net_device *dev,
14178                             struct rtnl_link_stats64 *stats)
14179 {
14180         struct tg3 *tp = netdev_priv(dev);
14181
14182         spin_lock_bh(&tp->lock);
14183         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14184                 *stats = tp->net_stats_prev;
14185                 spin_unlock_bh(&tp->lock);
14186                 return;
14187         }
14188
14189         tg3_get_nstats(tp, stats);
14190         spin_unlock_bh(&tp->lock);
14191 }
14192
14193 static void tg3_set_rx_mode(struct net_device *dev)
14194 {
14195         struct tg3 *tp = netdev_priv(dev);
14196
14197         if (!netif_running(dev))
14198                 return;
14199
14200         tg3_full_lock(tp, 0);
14201         __tg3_set_rx_mode(dev);
14202         tg3_full_unlock(tp);
14203 }
14204
14205 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14206                                int new_mtu)
14207 {
14208         dev->mtu = new_mtu;
14209
14210         if (new_mtu > ETH_DATA_LEN) {
14211                 if (tg3_flag(tp, 5780_CLASS)) {
14212                         netdev_update_features(dev);
14213                         tg3_flag_clear(tp, TSO_CAPABLE);
14214                 } else {
14215                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14216                 }
14217         } else {
14218                 if (tg3_flag(tp, 5780_CLASS)) {
14219                         tg3_flag_set(tp, TSO_CAPABLE);
14220                         netdev_update_features(dev);
14221                 }
14222                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14223         }
14224 }
14225
14226 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14227 {
14228         struct tg3 *tp = netdev_priv(dev);
14229         int err;
14230         bool reset_phy = false;
14231
14232         if (!netif_running(dev)) {
14233                 /* We'll just catch it later when the
14234                  * device is up'd.
14235                  */
14236                 tg3_set_mtu(dev, tp, new_mtu);
14237                 return 0;
14238         }
14239
14240         tg3_phy_stop(tp);
14241
14242         tg3_netif_stop(tp);
14243
14244         tg3_set_mtu(dev, tp, new_mtu);
14245
14246         tg3_full_lock(tp, 1);
14247
14248         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14249
14250         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14251          * breaks all requests to 256 bytes.
14252          */
14253         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14254             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14255             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14256             tg3_asic_rev(tp) == ASIC_REV_5720)
14257                 reset_phy = true;
14258
14259         err = tg3_restart_hw(tp, reset_phy);
14260
14261         if (!err)
14262                 tg3_netif_start(tp);
14263
14264         tg3_full_unlock(tp);
14265
14266         if (!err)
14267                 tg3_phy_start(tp);
14268
14269         return err;
14270 }
14271
14272 static const struct net_device_ops tg3_netdev_ops = {
14273         .ndo_open               = tg3_open,
14274         .ndo_stop               = tg3_close,
14275         .ndo_start_xmit         = tg3_start_xmit,
14276         .ndo_get_stats64        = tg3_get_stats64,
14277         .ndo_validate_addr      = eth_validate_addr,
14278         .ndo_set_rx_mode        = tg3_set_rx_mode,
14279         .ndo_set_mac_address    = tg3_set_mac_addr,
14280         .ndo_do_ioctl           = tg3_ioctl,
14281         .ndo_tx_timeout         = tg3_tx_timeout,
14282         .ndo_change_mtu         = tg3_change_mtu,
14283         .ndo_fix_features       = tg3_fix_features,
14284         .ndo_set_features       = tg3_set_features,
14285 #ifdef CONFIG_NET_POLL_CONTROLLER
14286         .ndo_poll_controller    = tg3_poll_controller,
14287 #endif
14288 };
14289
14290 static void tg3_get_eeprom_size(struct tg3 *tp)
14291 {
14292         u32 cursize, val, magic;
14293
14294         tp->nvram_size = EEPROM_CHIP_SIZE;
14295
14296         if (tg3_nvram_read(tp, 0, &magic) != 0)
14297                 return;
14298
14299         if ((magic != TG3_EEPROM_MAGIC) &&
14300             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14301             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14302                 return;
14303
14304         /*
14305          * Size the chip by reading offsets at increasing powers of two.
14306          * When we encounter our validation signature, we know the addressing
14307          * has wrapped around, and thus have our chip size.
14308          */
14309         cursize = 0x10;
14310
14311         while (cursize < tp->nvram_size) {
14312                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14313                         return;
14314
14315                 if (val == magic)
14316                         break;
14317
14318                 cursize <<= 1;
14319         }
14320
14321         tp->nvram_size = cursize;
14322 }
14323
14324 static void tg3_get_nvram_size(struct tg3 *tp)
14325 {
14326         u32 val;
14327
14328         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14329                 return;
14330
14331         /* Selfboot format */
14332         if (val != TG3_EEPROM_MAGIC) {
14333                 tg3_get_eeprom_size(tp);
14334                 return;
14335         }
14336
14337         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14338                 if (val != 0) {
14339                         /* This is confusing.  We want to operate on the
14340                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14341                          * call will read from NVRAM and byteswap the data
14342                          * according to the byteswapping settings for all
14343                          * other register accesses.  This ensures the data we
14344                          * want will always reside in the lower 16-bits.
14345                          * However, the data in NVRAM is in LE format, which
14346                          * means the data from the NVRAM read will always be
14347                          * opposite the endianness of the CPU.  The 16-bit
14348                          * byteswap then brings the data to CPU endianness.
14349                          */
14350                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14351                         return;
14352                 }
14353         }
14354         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14355 }
14356
14357 static void tg3_get_nvram_info(struct tg3 *tp)
14358 {
14359         u32 nvcfg1;
14360
14361         nvcfg1 = tr32(NVRAM_CFG1);
14362         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14363                 tg3_flag_set(tp, FLASH);
14364         } else {
14365                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14366                 tw32(NVRAM_CFG1, nvcfg1);
14367         }
14368
14369         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14370             tg3_flag(tp, 5780_CLASS)) {
14371                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14372                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14373                         tp->nvram_jedecnum = JEDEC_ATMEL;
14374                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14375                         tg3_flag_set(tp, NVRAM_BUFFERED);
14376                         break;
14377                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14378                         tp->nvram_jedecnum = JEDEC_ATMEL;
14379                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14380                         break;
14381                 case FLASH_VENDOR_ATMEL_EEPROM:
14382                         tp->nvram_jedecnum = JEDEC_ATMEL;
14383                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14384                         tg3_flag_set(tp, NVRAM_BUFFERED);
14385                         break;
14386                 case FLASH_VENDOR_ST:
14387                         tp->nvram_jedecnum = JEDEC_ST;
14388                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14389                         tg3_flag_set(tp, NVRAM_BUFFERED);
14390                         break;
14391                 case FLASH_VENDOR_SAIFUN:
14392                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14393                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14394                         break;
14395                 case FLASH_VENDOR_SST_SMALL:
14396                 case FLASH_VENDOR_SST_LARGE:
14397                         tp->nvram_jedecnum = JEDEC_SST;
14398                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14399                         break;
14400                 }
14401         } else {
14402                 tp->nvram_jedecnum = JEDEC_ATMEL;
14403                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14404                 tg3_flag_set(tp, NVRAM_BUFFERED);
14405         }
14406 }
14407
14408 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14409 {
14410         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14411         case FLASH_5752PAGE_SIZE_256:
14412                 tp->nvram_pagesize = 256;
14413                 break;
14414         case FLASH_5752PAGE_SIZE_512:
14415                 tp->nvram_pagesize = 512;
14416                 break;
14417         case FLASH_5752PAGE_SIZE_1K:
14418                 tp->nvram_pagesize = 1024;
14419                 break;
14420         case FLASH_5752PAGE_SIZE_2K:
14421                 tp->nvram_pagesize = 2048;
14422                 break;
14423         case FLASH_5752PAGE_SIZE_4K:
14424                 tp->nvram_pagesize = 4096;
14425                 break;
14426         case FLASH_5752PAGE_SIZE_264:
14427                 tp->nvram_pagesize = 264;
14428                 break;
14429         case FLASH_5752PAGE_SIZE_528:
14430                 tp->nvram_pagesize = 528;
14431                 break;
14432         }
14433 }
14434
14435 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14436 {
14437         u32 nvcfg1;
14438
14439         nvcfg1 = tr32(NVRAM_CFG1);
14440
14441         /* NVRAM protection for TPM */
14442         if (nvcfg1 & (1 << 27))
14443                 tg3_flag_set(tp, PROTECTED_NVRAM);
14444
14445         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14446         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14447         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14448                 tp->nvram_jedecnum = JEDEC_ATMEL;
14449                 tg3_flag_set(tp, NVRAM_BUFFERED);
14450                 break;
14451         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14452                 tp->nvram_jedecnum = JEDEC_ATMEL;
14453                 tg3_flag_set(tp, NVRAM_BUFFERED);
14454                 tg3_flag_set(tp, FLASH);
14455                 break;
14456         case FLASH_5752VENDOR_ST_M45PE10:
14457         case FLASH_5752VENDOR_ST_M45PE20:
14458         case FLASH_5752VENDOR_ST_M45PE40:
14459                 tp->nvram_jedecnum = JEDEC_ST;
14460                 tg3_flag_set(tp, NVRAM_BUFFERED);
14461                 tg3_flag_set(tp, FLASH);
14462                 break;
14463         }
14464
14465         if (tg3_flag(tp, FLASH)) {
14466                 tg3_nvram_get_pagesize(tp, nvcfg1);
14467         } else {
14468                 /* For eeprom, set pagesize to maximum eeprom size */
14469                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14470
14471                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14472                 tw32(NVRAM_CFG1, nvcfg1);
14473         }
14474 }
14475
14476 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14477 {
14478         u32 nvcfg1, protect = 0;
14479
14480         nvcfg1 = tr32(NVRAM_CFG1);
14481
14482         /* NVRAM protection for TPM */
14483         if (nvcfg1 & (1 << 27)) {
14484                 tg3_flag_set(tp, PROTECTED_NVRAM);
14485                 protect = 1;
14486         }
14487
14488         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14489         switch (nvcfg1) {
14490         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14491         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14492         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14493         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14494                 tp->nvram_jedecnum = JEDEC_ATMEL;
14495                 tg3_flag_set(tp, NVRAM_BUFFERED);
14496                 tg3_flag_set(tp, FLASH);
14497                 tp->nvram_pagesize = 264;
14498                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14499                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14500                         tp->nvram_size = (protect ? 0x3e200 :
14501                                           TG3_NVRAM_SIZE_512KB);
14502                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14503                         tp->nvram_size = (protect ? 0x1f200 :
14504                                           TG3_NVRAM_SIZE_256KB);
14505                 else
14506                         tp->nvram_size = (protect ? 0x1f200 :
14507                                           TG3_NVRAM_SIZE_128KB);
14508                 break;
14509         case FLASH_5752VENDOR_ST_M45PE10:
14510         case FLASH_5752VENDOR_ST_M45PE20:
14511         case FLASH_5752VENDOR_ST_M45PE40:
14512                 tp->nvram_jedecnum = JEDEC_ST;
14513                 tg3_flag_set(tp, NVRAM_BUFFERED);
14514                 tg3_flag_set(tp, FLASH);
14515                 tp->nvram_pagesize = 256;
14516                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14517                         tp->nvram_size = (protect ?
14518                                           TG3_NVRAM_SIZE_64KB :
14519                                           TG3_NVRAM_SIZE_128KB);
14520                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14521                         tp->nvram_size = (protect ?
14522                                           TG3_NVRAM_SIZE_64KB :
14523                                           TG3_NVRAM_SIZE_256KB);
14524                 else
14525                         tp->nvram_size = (protect ?
14526                                           TG3_NVRAM_SIZE_128KB :
14527                                           TG3_NVRAM_SIZE_512KB);
14528                 break;
14529         }
14530 }
14531
14532 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14533 {
14534         u32 nvcfg1;
14535
14536         nvcfg1 = tr32(NVRAM_CFG1);
14537
14538         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14539         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14540         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14541         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14542         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14543                 tp->nvram_jedecnum = JEDEC_ATMEL;
14544                 tg3_flag_set(tp, NVRAM_BUFFERED);
14545                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14546
14547                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14548                 tw32(NVRAM_CFG1, nvcfg1);
14549                 break;
14550         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14551         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14552         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14553         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14554                 tp->nvram_jedecnum = JEDEC_ATMEL;
14555                 tg3_flag_set(tp, NVRAM_BUFFERED);
14556                 tg3_flag_set(tp, FLASH);
14557                 tp->nvram_pagesize = 264;
14558                 break;
14559         case FLASH_5752VENDOR_ST_M45PE10:
14560         case FLASH_5752VENDOR_ST_M45PE20:
14561         case FLASH_5752VENDOR_ST_M45PE40:
14562                 tp->nvram_jedecnum = JEDEC_ST;
14563                 tg3_flag_set(tp, NVRAM_BUFFERED);
14564                 tg3_flag_set(tp, FLASH);
14565                 tp->nvram_pagesize = 256;
14566                 break;
14567         }
14568 }
14569
14570 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14571 {
14572         u32 nvcfg1, protect = 0;
14573
14574         nvcfg1 = tr32(NVRAM_CFG1);
14575
14576         /* NVRAM protection for TPM */
14577         if (nvcfg1 & (1 << 27)) {
14578                 tg3_flag_set(tp, PROTECTED_NVRAM);
14579                 protect = 1;
14580         }
14581
14582         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14583         switch (nvcfg1) {
14584         case FLASH_5761VENDOR_ATMEL_ADB021D:
14585         case FLASH_5761VENDOR_ATMEL_ADB041D:
14586         case FLASH_5761VENDOR_ATMEL_ADB081D:
14587         case FLASH_5761VENDOR_ATMEL_ADB161D:
14588         case FLASH_5761VENDOR_ATMEL_MDB021D:
14589         case FLASH_5761VENDOR_ATMEL_MDB041D:
14590         case FLASH_5761VENDOR_ATMEL_MDB081D:
14591         case FLASH_5761VENDOR_ATMEL_MDB161D:
14592                 tp->nvram_jedecnum = JEDEC_ATMEL;
14593                 tg3_flag_set(tp, NVRAM_BUFFERED);
14594                 tg3_flag_set(tp, FLASH);
14595                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14596                 tp->nvram_pagesize = 256;
14597                 break;
14598         case FLASH_5761VENDOR_ST_A_M45PE20:
14599         case FLASH_5761VENDOR_ST_A_M45PE40:
14600         case FLASH_5761VENDOR_ST_A_M45PE80:
14601         case FLASH_5761VENDOR_ST_A_M45PE16:
14602         case FLASH_5761VENDOR_ST_M_M45PE20:
14603         case FLASH_5761VENDOR_ST_M_M45PE40:
14604         case FLASH_5761VENDOR_ST_M_M45PE80:
14605         case FLASH_5761VENDOR_ST_M_M45PE16:
14606                 tp->nvram_jedecnum = JEDEC_ST;
14607                 tg3_flag_set(tp, NVRAM_BUFFERED);
14608                 tg3_flag_set(tp, FLASH);
14609                 tp->nvram_pagesize = 256;
14610                 break;
14611         }
14612
14613         if (protect) {
14614                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14615         } else {
14616                 switch (nvcfg1) {
14617                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14618                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14619                 case FLASH_5761VENDOR_ST_A_M45PE16:
14620                 case FLASH_5761VENDOR_ST_M_M45PE16:
14621                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14622                         break;
14623                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14624                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14625                 case FLASH_5761VENDOR_ST_A_M45PE80:
14626                 case FLASH_5761VENDOR_ST_M_M45PE80:
14627                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14628                         break;
14629                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14630                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14631                 case FLASH_5761VENDOR_ST_A_M45PE40:
14632                 case FLASH_5761VENDOR_ST_M_M45PE40:
14633                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14634                         break;
14635                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14636                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14637                 case FLASH_5761VENDOR_ST_A_M45PE20:
14638                 case FLASH_5761VENDOR_ST_M_M45PE20:
14639                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14640                         break;
14641                 }
14642         }
14643 }
14644
14645 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14646 {
14647         tp->nvram_jedecnum = JEDEC_ATMEL;
14648         tg3_flag_set(tp, NVRAM_BUFFERED);
14649         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14650 }
14651
14652 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14653 {
14654         u32 nvcfg1;
14655
14656         nvcfg1 = tr32(NVRAM_CFG1);
14657
14658         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14659         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14660         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14661                 tp->nvram_jedecnum = JEDEC_ATMEL;
14662                 tg3_flag_set(tp, NVRAM_BUFFERED);
14663                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14664
14665                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14666                 tw32(NVRAM_CFG1, nvcfg1);
14667                 return;
14668         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14669         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14670         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14671         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14672         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14673         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14674         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14675                 tp->nvram_jedecnum = JEDEC_ATMEL;
14676                 tg3_flag_set(tp, NVRAM_BUFFERED);
14677                 tg3_flag_set(tp, FLASH);
14678
14679                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14680                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14681                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14682                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14683                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14684                         break;
14685                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14686                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14687                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14688                         break;
14689                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14690                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14691                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14692                         break;
14693                 }
14694                 break;
14695         case FLASH_5752VENDOR_ST_M45PE10:
14696         case FLASH_5752VENDOR_ST_M45PE20:
14697         case FLASH_5752VENDOR_ST_M45PE40:
14698                 tp->nvram_jedecnum = JEDEC_ST;
14699                 tg3_flag_set(tp, NVRAM_BUFFERED);
14700                 tg3_flag_set(tp, FLASH);
14701
14702                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14703                 case FLASH_5752VENDOR_ST_M45PE10:
14704                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14705                         break;
14706                 case FLASH_5752VENDOR_ST_M45PE20:
14707                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14708                         break;
14709                 case FLASH_5752VENDOR_ST_M45PE40:
14710                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14711                         break;
14712                 }
14713                 break;
14714         default:
14715                 tg3_flag_set(tp, NO_NVRAM);
14716                 return;
14717         }
14718
14719         tg3_nvram_get_pagesize(tp, nvcfg1);
14720         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14721                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14722 }
14723
14724
14725 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14726 {
14727         u32 nvcfg1;
14728
14729         nvcfg1 = tr32(NVRAM_CFG1);
14730
14731         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14732         case FLASH_5717VENDOR_ATMEL_EEPROM:
14733         case FLASH_5717VENDOR_MICRO_EEPROM:
14734                 tp->nvram_jedecnum = JEDEC_ATMEL;
14735                 tg3_flag_set(tp, NVRAM_BUFFERED);
14736                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14737
14738                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14739                 tw32(NVRAM_CFG1, nvcfg1);
14740                 return;
14741         case FLASH_5717VENDOR_ATMEL_MDB011D:
14742         case FLASH_5717VENDOR_ATMEL_ADB011B:
14743         case FLASH_5717VENDOR_ATMEL_ADB011D:
14744         case FLASH_5717VENDOR_ATMEL_MDB021D:
14745         case FLASH_5717VENDOR_ATMEL_ADB021B:
14746         case FLASH_5717VENDOR_ATMEL_ADB021D:
14747         case FLASH_5717VENDOR_ATMEL_45USPT:
14748                 tp->nvram_jedecnum = JEDEC_ATMEL;
14749                 tg3_flag_set(tp, NVRAM_BUFFERED);
14750                 tg3_flag_set(tp, FLASH);
14751
14752                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14753                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14754                         /* Detect size with tg3_nvram_get_size() */
14755                         break;
14756                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14757                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14758                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14759                         break;
14760                 default:
14761                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14762                         break;
14763                 }
14764                 break;
14765         case FLASH_5717VENDOR_ST_M_M25PE10:
14766         case FLASH_5717VENDOR_ST_A_M25PE10:
14767         case FLASH_5717VENDOR_ST_M_M45PE10:
14768         case FLASH_5717VENDOR_ST_A_M45PE10:
14769         case FLASH_5717VENDOR_ST_M_M25PE20:
14770         case FLASH_5717VENDOR_ST_A_M25PE20:
14771         case FLASH_5717VENDOR_ST_M_M45PE20:
14772         case FLASH_5717VENDOR_ST_A_M45PE20:
14773         case FLASH_5717VENDOR_ST_25USPT:
14774         case FLASH_5717VENDOR_ST_45USPT:
14775                 tp->nvram_jedecnum = JEDEC_ST;
14776                 tg3_flag_set(tp, NVRAM_BUFFERED);
14777                 tg3_flag_set(tp, FLASH);
14778
14779                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14780                 case FLASH_5717VENDOR_ST_M_M25PE20:
14781                 case FLASH_5717VENDOR_ST_M_M45PE20:
14782                         /* Detect size with tg3_nvram_get_size() */
14783                         break;
14784                 case FLASH_5717VENDOR_ST_A_M25PE20:
14785                 case FLASH_5717VENDOR_ST_A_M45PE20:
14786                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14787                         break;
14788                 default:
14789                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14790                         break;
14791                 }
14792                 break;
14793         default:
14794                 tg3_flag_set(tp, NO_NVRAM);
14795                 return;
14796         }
14797
14798         tg3_nvram_get_pagesize(tp, nvcfg1);
14799         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14800                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14801 }
14802
14803 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14804 {
14805         u32 nvcfg1, nvmpinstrp, nv_status;
14806
14807         nvcfg1 = tr32(NVRAM_CFG1);
14808         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14809
14810         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14811                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14812                         tg3_flag_set(tp, NO_NVRAM);
14813                         return;
14814                 }
14815
14816                 switch (nvmpinstrp) {
14817                 case FLASH_5762_MX25L_100:
14818                 case FLASH_5762_MX25L_200:
14819                 case FLASH_5762_MX25L_400:
14820                 case FLASH_5762_MX25L_800:
14821                 case FLASH_5762_MX25L_160_320:
14822                         tp->nvram_pagesize = 4096;
14823                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14824                         tg3_flag_set(tp, NVRAM_BUFFERED);
14825                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14826                         tg3_flag_set(tp, FLASH);
14827                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14828                         tp->nvram_size =
14829                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14830                                                 AUTOSENSE_DEVID_MASK)
14831                                         << AUTOSENSE_SIZE_IN_MB);
14832                         return;
14833
14834                 case FLASH_5762_EEPROM_HD:
14835                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14836                         break;
14837                 case FLASH_5762_EEPROM_LD:
14838                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14839                         break;
14840                 case FLASH_5720VENDOR_M_ST_M45PE20:
14841                         /* This pinstrap supports multiple sizes, so force it
14842                          * to read the actual size from location 0xf0.
14843                          */
14844                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14845                         break;
14846                 }
14847         }
14848
14849         switch (nvmpinstrp) {
14850         case FLASH_5720_EEPROM_HD:
14851         case FLASH_5720_EEPROM_LD:
14852                 tp->nvram_jedecnum = JEDEC_ATMEL;
14853                 tg3_flag_set(tp, NVRAM_BUFFERED);
14854
14855                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14856                 tw32(NVRAM_CFG1, nvcfg1);
14857                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14858                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14859                 else
14860                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14861                 return;
14862         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14863         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14864         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14865         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14866         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14867         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14868         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14869         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14870         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14871         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14872         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14873         case FLASH_5720VENDOR_ATMEL_45USPT:
14874                 tp->nvram_jedecnum = JEDEC_ATMEL;
14875                 tg3_flag_set(tp, NVRAM_BUFFERED);
14876                 tg3_flag_set(tp, FLASH);
14877
14878                 switch (nvmpinstrp) {
14879                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14880                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14881                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14882                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14883                         break;
14884                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14885                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14886                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14887                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14888                         break;
14889                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14890                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14891                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14892                         break;
14893                 default:
14894                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14895                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14896                         break;
14897                 }
14898                 break;
14899         case FLASH_5720VENDOR_M_ST_M25PE10:
14900         case FLASH_5720VENDOR_M_ST_M45PE10:
14901         case FLASH_5720VENDOR_A_ST_M25PE10:
14902         case FLASH_5720VENDOR_A_ST_M45PE10:
14903         case FLASH_5720VENDOR_M_ST_M25PE20:
14904         case FLASH_5720VENDOR_M_ST_M45PE20:
14905         case FLASH_5720VENDOR_A_ST_M25PE20:
14906         case FLASH_5720VENDOR_A_ST_M45PE20:
14907         case FLASH_5720VENDOR_M_ST_M25PE40:
14908         case FLASH_5720VENDOR_M_ST_M45PE40:
14909         case FLASH_5720VENDOR_A_ST_M25PE40:
14910         case FLASH_5720VENDOR_A_ST_M45PE40:
14911         case FLASH_5720VENDOR_M_ST_M25PE80:
14912         case FLASH_5720VENDOR_M_ST_M45PE80:
14913         case FLASH_5720VENDOR_A_ST_M25PE80:
14914         case FLASH_5720VENDOR_A_ST_M45PE80:
14915         case FLASH_5720VENDOR_ST_25USPT:
14916         case FLASH_5720VENDOR_ST_45USPT:
14917                 tp->nvram_jedecnum = JEDEC_ST;
14918                 tg3_flag_set(tp, NVRAM_BUFFERED);
14919                 tg3_flag_set(tp, FLASH);
14920
14921                 switch (nvmpinstrp) {
14922                 case FLASH_5720VENDOR_M_ST_M25PE20:
14923                 case FLASH_5720VENDOR_M_ST_M45PE20:
14924                 case FLASH_5720VENDOR_A_ST_M25PE20:
14925                 case FLASH_5720VENDOR_A_ST_M45PE20:
14926                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14927                         break;
14928                 case FLASH_5720VENDOR_M_ST_M25PE40:
14929                 case FLASH_5720VENDOR_M_ST_M45PE40:
14930                 case FLASH_5720VENDOR_A_ST_M25PE40:
14931                 case FLASH_5720VENDOR_A_ST_M45PE40:
14932                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14933                         break;
14934                 case FLASH_5720VENDOR_M_ST_M25PE80:
14935                 case FLASH_5720VENDOR_M_ST_M45PE80:
14936                 case FLASH_5720VENDOR_A_ST_M25PE80:
14937                 case FLASH_5720VENDOR_A_ST_M45PE80:
14938                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14939                         break;
14940                 default:
14941                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14942                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14943                         break;
14944                 }
14945                 break;
14946         default:
14947                 tg3_flag_set(tp, NO_NVRAM);
14948                 return;
14949         }
14950
14951         tg3_nvram_get_pagesize(tp, nvcfg1);
14952         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14953                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14954
14955         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14956                 u32 val;
14957
14958                 if (tg3_nvram_read(tp, 0, &val))
14959                         return;
14960
14961                 if (val != TG3_EEPROM_MAGIC &&
14962                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14963                         tg3_flag_set(tp, NO_NVRAM);
14964         }
14965 }
14966
14967 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14968 static void tg3_nvram_init(struct tg3 *tp)
14969 {
14970         if (tg3_flag(tp, IS_SSB_CORE)) {
14971                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14972                 tg3_flag_clear(tp, NVRAM);
14973                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14974                 tg3_flag_set(tp, NO_NVRAM);
14975                 return;
14976         }
14977
14978         tw32_f(GRC_EEPROM_ADDR,
14979              (EEPROM_ADDR_FSM_RESET |
14980               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14981                EEPROM_ADDR_CLKPERD_SHIFT)));
14982
14983         msleep(1);
14984
14985         /* Enable seeprom accesses. */
14986         tw32_f(GRC_LOCAL_CTRL,
14987              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14988         udelay(100);
14989
14990         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14991             tg3_asic_rev(tp) != ASIC_REV_5701) {
14992                 tg3_flag_set(tp, NVRAM);
14993
14994                 if (tg3_nvram_lock(tp)) {
14995                         netdev_warn(tp->dev,
14996                                     "Cannot get nvram lock, %s failed\n",
14997                                     __func__);
14998                         return;
14999                 }
15000                 tg3_enable_nvram_access(tp);
15001
15002                 tp->nvram_size = 0;
15003
15004                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15005                         tg3_get_5752_nvram_info(tp);
15006                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15007                         tg3_get_5755_nvram_info(tp);
15008                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15009                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15010                          tg3_asic_rev(tp) == ASIC_REV_5785)
15011                         tg3_get_5787_nvram_info(tp);
15012                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15013                         tg3_get_5761_nvram_info(tp);
15014                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15015                         tg3_get_5906_nvram_info(tp);
15016                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15017                          tg3_flag(tp, 57765_CLASS))
15018                         tg3_get_57780_nvram_info(tp);
15019                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15020                          tg3_asic_rev(tp) == ASIC_REV_5719)
15021                         tg3_get_5717_nvram_info(tp);
15022                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15023                          tg3_asic_rev(tp) == ASIC_REV_5762)
15024                         tg3_get_5720_nvram_info(tp);
15025                 else
15026                         tg3_get_nvram_info(tp);
15027
15028                 if (tp->nvram_size == 0)
15029                         tg3_get_nvram_size(tp);
15030
15031                 tg3_disable_nvram_access(tp);
15032                 tg3_nvram_unlock(tp);
15033
15034         } else {
15035                 tg3_flag_clear(tp, NVRAM);
15036                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15037
15038                 tg3_get_eeprom_size(tp);
15039         }
15040 }
15041
15042 struct subsys_tbl_ent {
15043         u16 subsys_vendor, subsys_devid;
15044         u32 phy_id;
15045 };
15046
15047 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15048         /* Broadcom boards. */
15049         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15050           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15051         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15052           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15053         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15054           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15055         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15056           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15057         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15058           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15059         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15060           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15061         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15062           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15063         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15064           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15065         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15066           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15067         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15068           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15069         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15070           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15071
15072         /* 3com boards. */
15073         { TG3PCI_SUBVENDOR_ID_3COM,
15074           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15075         { TG3PCI_SUBVENDOR_ID_3COM,
15076           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15077         { TG3PCI_SUBVENDOR_ID_3COM,
15078           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15079         { TG3PCI_SUBVENDOR_ID_3COM,
15080           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15081         { TG3PCI_SUBVENDOR_ID_3COM,
15082           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15083
15084         /* DELL boards. */
15085         { TG3PCI_SUBVENDOR_ID_DELL,
15086           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15087         { TG3PCI_SUBVENDOR_ID_DELL,
15088           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15089         { TG3PCI_SUBVENDOR_ID_DELL,
15090           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15091         { TG3PCI_SUBVENDOR_ID_DELL,
15092           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15093
15094         /* Compaq boards. */
15095         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15096           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15097         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15098           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15099         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15100           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15101         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15102           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15103         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15104           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15105
15106         /* IBM boards. */
15107         { TG3PCI_SUBVENDOR_ID_IBM,
15108           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15109 };
15110
15111 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15112 {
15113         int i;
15114
15115         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15116                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15117                      tp->pdev->subsystem_vendor) &&
15118                     (subsys_id_to_phy_id[i].subsys_devid ==
15119                      tp->pdev->subsystem_device))
15120                         return &subsys_id_to_phy_id[i];
15121         }
15122         return NULL;
15123 }
15124
15125 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15126 {
15127         u32 val;
15128
15129         tp->phy_id = TG3_PHY_ID_INVALID;
15130         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15131
15132         /* Assume an onboard device and WOL capable by default.  */
15133         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15134         tg3_flag_set(tp, WOL_CAP);
15135
15136         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15137                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15138                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15139                         tg3_flag_set(tp, IS_NIC);
15140                 }
15141                 val = tr32(VCPU_CFGSHDW);
15142                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15143                         tg3_flag_set(tp, ASPM_WORKAROUND);
15144                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15145                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15146                         tg3_flag_set(tp, WOL_ENABLE);
15147                         device_set_wakeup_enable(&tp->pdev->dev, true);
15148                 }
15149                 goto done;
15150         }
15151
15152         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15153         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15154                 u32 nic_cfg, led_cfg;
15155                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15156                 u32 nic_phy_id, ver, eeprom_phy_id;
15157                 int eeprom_phy_serdes = 0;
15158
15159                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15160                 tp->nic_sram_data_cfg = nic_cfg;
15161
15162                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15163                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15164                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15165                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15166                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15167                     (ver > 0) && (ver < 0x100))
15168                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15169
15170                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15171                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15172
15173                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15174                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15175                     tg3_asic_rev(tp) == ASIC_REV_5720)
15176                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15177
15178                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15179                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15180                         eeprom_phy_serdes = 1;
15181
15182                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15183                 if (nic_phy_id != 0) {
15184                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15185                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15186
15187                         eeprom_phy_id  = (id1 >> 16) << 10;
15188                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15189                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15190                 } else
15191                         eeprom_phy_id = 0;
15192
15193                 tp->phy_id = eeprom_phy_id;
15194                 if (eeprom_phy_serdes) {
15195                         if (!tg3_flag(tp, 5705_PLUS))
15196                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15197                         else
15198                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15199                 }
15200
15201                 if (tg3_flag(tp, 5750_PLUS))
15202                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15203                                     SHASTA_EXT_LED_MODE_MASK);
15204                 else
15205                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15206
15207                 switch (led_cfg) {
15208                 default:
15209                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15210                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15211                         break;
15212
15213                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15214                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15215                         break;
15216
15217                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15218                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15219
15220                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15221                          * read on some older 5700/5701 bootcode.
15222                          */
15223                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15224                             tg3_asic_rev(tp) == ASIC_REV_5701)
15225                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15226
15227                         break;
15228
15229                 case SHASTA_EXT_LED_SHARED:
15230                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15231                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15232                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15233                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15234                                                  LED_CTRL_MODE_PHY_2);
15235
15236                         if (tg3_flag(tp, 5717_PLUS) ||
15237                             tg3_asic_rev(tp) == ASIC_REV_5762)
15238                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15239                                                 LED_CTRL_BLINK_RATE_MASK;
15240
15241                         break;
15242
15243                 case SHASTA_EXT_LED_MAC:
15244                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15245                         break;
15246
15247                 case SHASTA_EXT_LED_COMBO:
15248                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15249                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15250                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15251                                                  LED_CTRL_MODE_PHY_2);
15252                         break;
15253
15254                 }
15255
15256                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15257                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15258                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15259                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15260
15261                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15262                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15263
15264                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15265                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15266                         if ((tp->pdev->subsystem_vendor ==
15267                              PCI_VENDOR_ID_ARIMA) &&
15268                             (tp->pdev->subsystem_device == 0x205a ||
15269                              tp->pdev->subsystem_device == 0x2063))
15270                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15271                 } else {
15272                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15273                         tg3_flag_set(tp, IS_NIC);
15274                 }
15275
15276                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15277                         tg3_flag_set(tp, ENABLE_ASF);
15278                         if (tg3_flag(tp, 5750_PLUS))
15279                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15280                 }
15281
15282                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15283                     tg3_flag(tp, 5750_PLUS))
15284                         tg3_flag_set(tp, ENABLE_APE);
15285
15286                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15287                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15288                         tg3_flag_clear(tp, WOL_CAP);
15289
15290                 if (tg3_flag(tp, WOL_CAP) &&
15291                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15292                         tg3_flag_set(tp, WOL_ENABLE);
15293                         device_set_wakeup_enable(&tp->pdev->dev, true);
15294                 }
15295
15296                 if (cfg2 & (1 << 17))
15297                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15298
15299                 /* serdes signal pre-emphasis in register 0x590 set by */
15300                 /* bootcode if bit 18 is set */
15301                 if (cfg2 & (1 << 18))
15302                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15303
15304                 if ((tg3_flag(tp, 57765_PLUS) ||
15305                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15306                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15307                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15308                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15309
15310                 if (tg3_flag(tp, PCI_EXPRESS)) {
15311                         u32 cfg3;
15312
15313                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15314                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15315                             !tg3_flag(tp, 57765_PLUS) &&
15316                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15317                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15318                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15319                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15320                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15321                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15322                 }
15323
15324                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15325                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15326                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15327                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15328                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15329                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15330
15331                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15332                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15333         }
15334 done:
15335         if (tg3_flag(tp, WOL_CAP))
15336                 device_set_wakeup_enable(&tp->pdev->dev,
15337                                          tg3_flag(tp, WOL_ENABLE));
15338         else
15339                 device_set_wakeup_capable(&tp->pdev->dev, false);
15340 }
15341
15342 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15343 {
15344         int i, err;
15345         u32 val2, off = offset * 8;
15346
15347         err = tg3_nvram_lock(tp);
15348         if (err)
15349                 return err;
15350
15351         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15352         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15353                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15354         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15355         udelay(10);
15356
15357         for (i = 0; i < 100; i++) {
15358                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15359                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15360                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15361                         break;
15362                 }
15363                 udelay(10);
15364         }
15365
15366         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15367
15368         tg3_nvram_unlock(tp);
15369         if (val2 & APE_OTP_STATUS_CMD_DONE)
15370                 return 0;
15371
15372         return -EBUSY;
15373 }
15374
15375 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15376 {
15377         int i;
15378         u32 val;
15379
15380         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15381         tw32(OTP_CTRL, cmd);
15382
15383         /* Wait for up to 1 ms for command to execute. */
15384         for (i = 0; i < 100; i++) {
15385                 val = tr32(OTP_STATUS);
15386                 if (val & OTP_STATUS_CMD_DONE)
15387                         break;
15388                 udelay(10);
15389         }
15390
15391         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15392 }
15393
15394 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15395  * configuration is a 32-bit value that straddles the alignment boundary.
15396  * We do two 32-bit reads and then shift and merge the results.
15397  */
15398 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15399 {
15400         u32 bhalf_otp, thalf_otp;
15401
15402         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15403
15404         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15405                 return 0;
15406
15407         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15408
15409         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15410                 return 0;
15411
15412         thalf_otp = tr32(OTP_READ_DATA);
15413
15414         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15415
15416         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15417                 return 0;
15418
15419         bhalf_otp = tr32(OTP_READ_DATA);
15420
15421         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15422 }
15423
15424 static void tg3_phy_init_link_config(struct tg3 *tp)
15425 {
15426         u32 adv = ADVERTISED_Autoneg;
15427
15428         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15429                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15430                         adv |= ADVERTISED_1000baseT_Half;
15431                 adv |= ADVERTISED_1000baseT_Full;
15432         }
15433
15434         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15435                 adv |= ADVERTISED_100baseT_Half |
15436                        ADVERTISED_100baseT_Full |
15437                        ADVERTISED_10baseT_Half |
15438                        ADVERTISED_10baseT_Full |
15439                        ADVERTISED_TP;
15440         else
15441                 adv |= ADVERTISED_FIBRE;
15442
15443         tp->link_config.advertising = adv;
15444         tp->link_config.speed = SPEED_UNKNOWN;
15445         tp->link_config.duplex = DUPLEX_UNKNOWN;
15446         tp->link_config.autoneg = AUTONEG_ENABLE;
15447         tp->link_config.active_speed = SPEED_UNKNOWN;
15448         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15449
15450         tp->old_link = -1;
15451 }
15452
15453 static int tg3_phy_probe(struct tg3 *tp)
15454 {
15455         u32 hw_phy_id_1, hw_phy_id_2;
15456         u32 hw_phy_id, hw_phy_id_masked;
15457         int err;
15458
15459         /* flow control autonegotiation is default behavior */
15460         tg3_flag_set(tp, PAUSE_AUTONEG);
15461         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15462
15463         if (tg3_flag(tp, ENABLE_APE)) {
15464                 switch (tp->pci_fn) {
15465                 case 0:
15466                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15467                         break;
15468                 case 1:
15469                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15470                         break;
15471                 case 2:
15472                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15473                         break;
15474                 case 3:
15475                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15476                         break;
15477                 }
15478         }
15479
15480         if (!tg3_flag(tp, ENABLE_ASF) &&
15481             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15482             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15483                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15484                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15485
15486         if (tg3_flag(tp, USE_PHYLIB))
15487                 return tg3_phy_init(tp);
15488
15489         /* Reading the PHY ID register can conflict with ASF
15490          * firmware access to the PHY hardware.
15491          */
15492         err = 0;
15493         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15494                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15495         } else {
15496                 /* Now read the physical PHY_ID from the chip and verify
15497                  * that it is sane.  If it doesn't look good, we fall back
15498                  * to either the hard-coded table based PHY_ID and failing
15499                  * that the value found in the eeprom area.
15500                  */
15501                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15502                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15503
15504                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15505                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15506                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15507
15508                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15509         }
15510
15511         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15512                 tp->phy_id = hw_phy_id;
15513                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15514                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15515                 else
15516                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15517         } else {
15518                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15519                         /* Do nothing, phy ID already set up in
15520                          * tg3_get_eeprom_hw_cfg().
15521                          */
15522                 } else {
15523                         struct subsys_tbl_ent *p;
15524
15525                         /* No eeprom signature?  Try the hardcoded
15526                          * subsys device table.
15527                          */
15528                         p = tg3_lookup_by_subsys(tp);
15529                         if (p) {
15530                                 tp->phy_id = p->phy_id;
15531                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15532                                 /* For now we saw the IDs 0xbc050cd0,
15533                                  * 0xbc050f80 and 0xbc050c30 on devices
15534                                  * connected to an BCM4785 and there are
15535                                  * probably more. Just assume that the phy is
15536                                  * supported when it is connected to a SSB core
15537                                  * for now.
15538                                  */
15539                                 return -ENODEV;
15540                         }
15541
15542                         if (!tp->phy_id ||
15543                             tp->phy_id == TG3_PHY_ID_BCM8002)
15544                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15545                 }
15546         }
15547
15548         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15549             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15550              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15551              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15552              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15553              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15554               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15555              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15556               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15557                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15558
15559                 tp->eee.supported = SUPPORTED_100baseT_Full |
15560                                     SUPPORTED_1000baseT_Full;
15561                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15562                                      ADVERTISED_1000baseT_Full;
15563                 tp->eee.eee_enabled = 1;
15564                 tp->eee.tx_lpi_enabled = 1;
15565                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15566         }
15567
15568         tg3_phy_init_link_config(tp);
15569
15570         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15571             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15572             !tg3_flag(tp, ENABLE_APE) &&
15573             !tg3_flag(tp, ENABLE_ASF)) {
15574                 u32 bmsr, dummy;
15575
15576                 tg3_readphy(tp, MII_BMSR, &bmsr);
15577                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15578                     (bmsr & BMSR_LSTATUS))
15579                         goto skip_phy_reset;
15580
15581                 err = tg3_phy_reset(tp);
15582                 if (err)
15583                         return err;
15584
15585                 tg3_phy_set_wirespeed(tp);
15586
15587                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15588                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15589                                             tp->link_config.flowctrl);
15590
15591                         tg3_writephy(tp, MII_BMCR,
15592                                      BMCR_ANENABLE | BMCR_ANRESTART);
15593                 }
15594         }
15595
15596 skip_phy_reset:
15597         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15598                 err = tg3_init_5401phy_dsp(tp);
15599                 if (err)
15600                         return err;
15601
15602                 err = tg3_init_5401phy_dsp(tp);
15603         }
15604
15605         return err;
15606 }
15607
15608 static void tg3_read_vpd(struct tg3 *tp)
15609 {
15610         u8 *vpd_data;
15611         unsigned int block_end, rosize, len;
15612         u32 vpdlen;
15613         int j, i = 0;
15614
15615         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15616         if (!vpd_data)
15617                 goto out_no_vpd;
15618
15619         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15620         if (i < 0)
15621                 goto out_not_found;
15622
15623         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15624         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15625         i += PCI_VPD_LRDT_TAG_SIZE;
15626
15627         if (block_end > vpdlen)
15628                 goto out_not_found;
15629
15630         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15631                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15632         if (j > 0) {
15633                 len = pci_vpd_info_field_size(&vpd_data[j]);
15634
15635                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15636                 if (j + len > block_end || len != 4 ||
15637                     memcmp(&vpd_data[j], "1028", 4))
15638                         goto partno;
15639
15640                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15641                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15642                 if (j < 0)
15643                         goto partno;
15644
15645                 len = pci_vpd_info_field_size(&vpd_data[j]);
15646
15647                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15648                 if (j + len > block_end)
15649                         goto partno;
15650
15651                 if (len >= sizeof(tp->fw_ver))
15652                         len = sizeof(tp->fw_ver) - 1;
15653                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15654                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15655                          &vpd_data[j]);
15656         }
15657
15658 partno:
15659         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15660                                       PCI_VPD_RO_KEYWORD_PARTNO);
15661         if (i < 0)
15662                 goto out_not_found;
15663
15664         len = pci_vpd_info_field_size(&vpd_data[i]);
15665
15666         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15667         if (len > TG3_BPN_SIZE ||
15668             (len + i) > vpdlen)
15669                 goto out_not_found;
15670
15671         memcpy(tp->board_part_number, &vpd_data[i], len);
15672
15673 out_not_found:
15674         kfree(vpd_data);
15675         if (tp->board_part_number[0])
15676                 return;
15677
15678 out_no_vpd:
15679         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15680                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15681                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15682                         strcpy(tp->board_part_number, "BCM5717");
15683                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15684                         strcpy(tp->board_part_number, "BCM5718");
15685                 else
15686                         goto nomatch;
15687         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15688                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15689                         strcpy(tp->board_part_number, "BCM57780");
15690                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15691                         strcpy(tp->board_part_number, "BCM57760");
15692                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15693                         strcpy(tp->board_part_number, "BCM57790");
15694                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15695                         strcpy(tp->board_part_number, "BCM57788");
15696                 else
15697                         goto nomatch;
15698         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15699                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15700                         strcpy(tp->board_part_number, "BCM57761");
15701                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15702                         strcpy(tp->board_part_number, "BCM57765");
15703                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15704                         strcpy(tp->board_part_number, "BCM57781");
15705                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15706                         strcpy(tp->board_part_number, "BCM57785");
15707                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15708                         strcpy(tp->board_part_number, "BCM57791");
15709                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15710                         strcpy(tp->board_part_number, "BCM57795");
15711                 else
15712                         goto nomatch;
15713         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15714                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15715                         strcpy(tp->board_part_number, "BCM57762");
15716                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15717                         strcpy(tp->board_part_number, "BCM57766");
15718                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15719                         strcpy(tp->board_part_number, "BCM57782");
15720                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15721                         strcpy(tp->board_part_number, "BCM57786");
15722                 else
15723                         goto nomatch;
15724         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15725                 strcpy(tp->board_part_number, "BCM95906");
15726         } else {
15727 nomatch:
15728                 strcpy(tp->board_part_number, "none");
15729         }
15730 }
15731
15732 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15733 {
15734         u32 val;
15735
15736         if (tg3_nvram_read(tp, offset, &val) ||
15737             (val & 0xfc000000) != 0x0c000000 ||
15738             tg3_nvram_read(tp, offset + 4, &val) ||
15739             val != 0)
15740                 return 0;
15741
15742         return 1;
15743 }
15744
15745 static void tg3_read_bc_ver(struct tg3 *tp)
15746 {
15747         u32 val, offset, start, ver_offset;
15748         int i, dst_off;
15749         bool newver = false;
15750
15751         if (tg3_nvram_read(tp, 0xc, &offset) ||
15752             tg3_nvram_read(tp, 0x4, &start))
15753                 return;
15754
15755         offset = tg3_nvram_logical_addr(tp, offset);
15756
15757         if (tg3_nvram_read(tp, offset, &val))
15758                 return;
15759
15760         if ((val & 0xfc000000) == 0x0c000000) {
15761                 if (tg3_nvram_read(tp, offset + 4, &val))
15762                         return;
15763
15764                 if (val == 0)
15765                         newver = true;
15766         }
15767
15768         dst_off = strlen(tp->fw_ver);
15769
15770         if (newver) {
15771                 if (TG3_VER_SIZE - dst_off < 16 ||
15772                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15773                         return;
15774
15775                 offset = offset + ver_offset - start;
15776                 for (i = 0; i < 16; i += 4) {
15777                         __be32 v;
15778                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15779                                 return;
15780
15781                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15782                 }
15783         } else {
15784                 u32 major, minor;
15785
15786                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15787                         return;
15788
15789                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15790                         TG3_NVM_BCVER_MAJSFT;
15791                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15792                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15793                          "v%d.%02d", major, minor);
15794         }
15795 }
15796
15797 static void tg3_read_hwsb_ver(struct tg3 *tp)
15798 {
15799         u32 val, major, minor;
15800
15801         /* Use native endian representation */
15802         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15803                 return;
15804
15805         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15806                 TG3_NVM_HWSB_CFG1_MAJSFT;
15807         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15808                 TG3_NVM_HWSB_CFG1_MINSFT;
15809
15810         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15811 }
15812
15813 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15814 {
15815         u32 offset, major, minor, build;
15816
15817         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15818
15819         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15820                 return;
15821
15822         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15823         case TG3_EEPROM_SB_REVISION_0:
15824                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15825                 break;
15826         case TG3_EEPROM_SB_REVISION_2:
15827                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15828                 break;
15829         case TG3_EEPROM_SB_REVISION_3:
15830                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15831                 break;
15832         case TG3_EEPROM_SB_REVISION_4:
15833                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15834                 break;
15835         case TG3_EEPROM_SB_REVISION_5:
15836                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15837                 break;
15838         case TG3_EEPROM_SB_REVISION_6:
15839                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15840                 break;
15841         default:
15842                 return;
15843         }
15844
15845         if (tg3_nvram_read(tp, offset, &val))
15846                 return;
15847
15848         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15849                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15850         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15851                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15852         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15853
15854         if (minor > 99 || build > 26)
15855                 return;
15856
15857         offset = strlen(tp->fw_ver);
15858         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15859                  " v%d.%02d", major, minor);
15860
15861         if (build > 0) {
15862                 offset = strlen(tp->fw_ver);
15863                 if (offset < TG3_VER_SIZE - 1)
15864                         tp->fw_ver[offset] = 'a' + build - 1;
15865         }
15866 }
15867
15868 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15869 {
15870         u32 val, offset, start;
15871         int i, vlen;
15872
15873         for (offset = TG3_NVM_DIR_START;
15874              offset < TG3_NVM_DIR_END;
15875              offset += TG3_NVM_DIRENT_SIZE) {
15876                 if (tg3_nvram_read(tp, offset, &val))
15877                         return;
15878
15879                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15880                         break;
15881         }
15882
15883         if (offset == TG3_NVM_DIR_END)
15884                 return;
15885
15886         if (!tg3_flag(tp, 5705_PLUS))
15887                 start = 0x08000000;
15888         else if (tg3_nvram_read(tp, offset - 4, &start))
15889                 return;
15890
15891         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15892             !tg3_fw_img_is_valid(tp, offset) ||
15893             tg3_nvram_read(tp, offset + 8, &val))
15894                 return;
15895
15896         offset += val - start;
15897
15898         vlen = strlen(tp->fw_ver);
15899
15900         tp->fw_ver[vlen++] = ',';
15901         tp->fw_ver[vlen++] = ' ';
15902
15903         for (i = 0; i < 4; i++) {
15904                 __be32 v;
15905                 if (tg3_nvram_read_be32(tp, offset, &v))
15906                         return;
15907
15908                 offset += sizeof(v);
15909
15910                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15911                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15912                         break;
15913                 }
15914
15915                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15916                 vlen += sizeof(v);
15917         }
15918 }
15919
15920 static void tg3_probe_ncsi(struct tg3 *tp)
15921 {
15922         u32 apedata;
15923
15924         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15925         if (apedata != APE_SEG_SIG_MAGIC)
15926                 return;
15927
15928         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15929         if (!(apedata & APE_FW_STATUS_READY))
15930                 return;
15931
15932         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15933                 tg3_flag_set(tp, APE_HAS_NCSI);
15934 }
15935
15936 static void tg3_read_dash_ver(struct tg3 *tp)
15937 {
15938         int vlen;
15939         u32 apedata;
15940         char *fwtype;
15941
15942         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15943
15944         if (tg3_flag(tp, APE_HAS_NCSI))
15945                 fwtype = "NCSI";
15946         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15947                 fwtype = "SMASH";
15948         else
15949                 fwtype = "DASH";
15950
15951         vlen = strlen(tp->fw_ver);
15952
15953         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15954                  fwtype,
15955                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15956                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15957                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15958                  (apedata & APE_FW_VERSION_BLDMSK));
15959 }
15960
15961 static void tg3_read_otp_ver(struct tg3 *tp)
15962 {
15963         u32 val, val2;
15964
15965         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15966                 return;
15967
15968         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15969             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15970             TG3_OTP_MAGIC0_VALID(val)) {
15971                 u64 val64 = (u64) val << 32 | val2;
15972                 u32 ver = 0;
15973                 int i, vlen;
15974
15975                 for (i = 0; i < 7; i++) {
15976                         if ((val64 & 0xff) == 0)
15977                                 break;
15978                         ver = val64 & 0xff;
15979                         val64 >>= 8;
15980                 }
15981                 vlen = strlen(tp->fw_ver);
15982                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15983         }
15984 }
15985
15986 static void tg3_read_fw_ver(struct tg3 *tp)
15987 {
15988         u32 val;
15989         bool vpd_vers = false;
15990
15991         if (tp->fw_ver[0] != 0)
15992                 vpd_vers = true;
15993
15994         if (tg3_flag(tp, NO_NVRAM)) {
15995                 strcat(tp->fw_ver, "sb");
15996                 tg3_read_otp_ver(tp);
15997                 return;
15998         }
15999
16000         if (tg3_nvram_read(tp, 0, &val))
16001                 return;
16002
16003         if (val == TG3_EEPROM_MAGIC)
16004                 tg3_read_bc_ver(tp);
16005         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16006                 tg3_read_sb_ver(tp, val);
16007         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16008                 tg3_read_hwsb_ver(tp);
16009
16010         if (tg3_flag(tp, ENABLE_ASF)) {
16011                 if (tg3_flag(tp, ENABLE_APE)) {
16012                         tg3_probe_ncsi(tp);
16013                         if (!vpd_vers)
16014                                 tg3_read_dash_ver(tp);
16015                 } else if (!vpd_vers) {
16016                         tg3_read_mgmtfw_ver(tp);
16017                 }
16018         }
16019
16020         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16021 }
16022
16023 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16024 {
16025         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16026                 return TG3_RX_RET_MAX_SIZE_5717;
16027         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16028                 return TG3_RX_RET_MAX_SIZE_5700;
16029         else
16030                 return TG3_RX_RET_MAX_SIZE_5705;
16031 }
16032
16033 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16034         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16035         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16036         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16037         { },
16038 };
16039
16040 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16041 {
16042         struct pci_dev *peer;
16043         unsigned int func, devnr = tp->pdev->devfn & ~7;
16044
16045         for (func = 0; func < 8; func++) {
16046                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16047                 if (peer && peer != tp->pdev)
16048                         break;
16049                 pci_dev_put(peer);
16050         }
16051         /* 5704 can be configured in single-port mode, set peer to
16052          * tp->pdev in that case.
16053          */
16054         if (!peer) {
16055                 peer = tp->pdev;
16056                 return peer;
16057         }
16058
16059         /*
16060          * We don't need to keep the refcount elevated; there's no way
16061          * to remove one half of this device without removing the other
16062          */
16063         pci_dev_put(peer);
16064
16065         return peer;
16066 }
16067
16068 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16069 {
16070         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16071         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16072                 u32 reg;
16073
16074                 /* All devices that use the alternate
16075                  * ASIC REV location have a CPMU.
16076                  */
16077                 tg3_flag_set(tp, CPMU_PRESENT);
16078
16079                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16080                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16081                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16082                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16083                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16084                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16085                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16086                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16087                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16088                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16089                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16090                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16091                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16092                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16093                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16094                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16095                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16096                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16097                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16098                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16099                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16100                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16101                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16102                 else
16103                         reg = TG3PCI_PRODID_ASICREV;
16104
16105                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16106         }
16107
16108         /* Wrong chip ID in 5752 A0. This code can be removed later
16109          * as A0 is not in production.
16110          */
16111         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16112                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16113
16114         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16115                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16116
16117         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16118             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16119             tg3_asic_rev(tp) == ASIC_REV_5720)
16120                 tg3_flag_set(tp, 5717_PLUS);
16121
16122         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16123             tg3_asic_rev(tp) == ASIC_REV_57766)
16124                 tg3_flag_set(tp, 57765_CLASS);
16125
16126         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16127              tg3_asic_rev(tp) == ASIC_REV_5762)
16128                 tg3_flag_set(tp, 57765_PLUS);
16129
16130         /* Intentionally exclude ASIC_REV_5906 */
16131         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16132             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16133             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16134             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16135             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16136             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16137             tg3_flag(tp, 57765_PLUS))
16138                 tg3_flag_set(tp, 5755_PLUS);
16139
16140         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16141             tg3_asic_rev(tp) == ASIC_REV_5714)
16142                 tg3_flag_set(tp, 5780_CLASS);
16143
16144         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16145             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16146             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16147             tg3_flag(tp, 5755_PLUS) ||
16148             tg3_flag(tp, 5780_CLASS))
16149                 tg3_flag_set(tp, 5750_PLUS);
16150
16151         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16152             tg3_flag(tp, 5750_PLUS))
16153                 tg3_flag_set(tp, 5705_PLUS);
16154 }
16155
16156 static bool tg3_10_100_only_device(struct tg3 *tp,
16157                                    const struct pci_device_id *ent)
16158 {
16159         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16160
16161         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16162              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16163             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16164                 return true;
16165
16166         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16167                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16168                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16169                                 return true;
16170                 } else {
16171                         return true;
16172                 }
16173         }
16174
16175         return false;
16176 }
16177
16178 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16179 {
16180         u32 misc_ctrl_reg;
16181         u32 pci_state_reg, grc_misc_cfg;
16182         u32 val;
16183         u16 pci_cmd;
16184         int err;
16185
16186         /* Force memory write invalidate off.  If we leave it on,
16187          * then on 5700_BX chips we have to enable a workaround.
16188          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16189          * to match the cacheline size.  The Broadcom driver have this
16190          * workaround but turns MWI off all the times so never uses
16191          * it.  This seems to suggest that the workaround is insufficient.
16192          */
16193         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16194         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16195         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16196
16197         /* Important! -- Make sure register accesses are byteswapped
16198          * correctly.  Also, for those chips that require it, make
16199          * sure that indirect register accesses are enabled before
16200          * the first operation.
16201          */
16202         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16203                               &misc_ctrl_reg);
16204         tp->misc_host_ctrl |= (misc_ctrl_reg &
16205                                MISC_HOST_CTRL_CHIPREV);
16206         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16207                                tp->misc_host_ctrl);
16208
16209         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16210
16211         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16212          * we need to disable memory and use config. cycles
16213          * only to access all registers. The 5702/03 chips
16214          * can mistakenly decode the special cycles from the
16215          * ICH chipsets as memory write cycles, causing corruption
16216          * of register and memory space. Only certain ICH bridges
16217          * will drive special cycles with non-zero data during the
16218          * address phase which can fall within the 5703's address
16219          * range. This is not an ICH bug as the PCI spec allows
16220          * non-zero address during special cycles. However, only
16221          * these ICH bridges are known to drive non-zero addresses
16222          * during special cycles.
16223          *
16224          * Since special cycles do not cross PCI bridges, we only
16225          * enable this workaround if the 5703 is on the secondary
16226          * bus of these ICH bridges.
16227          */
16228         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16229             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16230                 static struct tg3_dev_id {
16231                         u32     vendor;
16232                         u32     device;
16233                         u32     rev;
16234                 } ich_chipsets[] = {
16235                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16236                           PCI_ANY_ID },
16237                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16238                           PCI_ANY_ID },
16239                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16240                           0xa },
16241                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16242                           PCI_ANY_ID },
16243                         { },
16244                 };
16245                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16246                 struct pci_dev *bridge = NULL;
16247
16248                 while (pci_id->vendor != 0) {
16249                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16250                                                 bridge);
16251                         if (!bridge) {
16252                                 pci_id++;
16253                                 continue;
16254                         }
16255                         if (pci_id->rev != PCI_ANY_ID) {
16256                                 if (bridge->revision > pci_id->rev)
16257                                         continue;
16258                         }
16259                         if (bridge->subordinate &&
16260                             (bridge->subordinate->number ==
16261                              tp->pdev->bus->number)) {
16262                                 tg3_flag_set(tp, ICH_WORKAROUND);
16263                                 pci_dev_put(bridge);
16264                                 break;
16265                         }
16266                 }
16267         }
16268
16269         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16270                 static struct tg3_dev_id {
16271                         u32     vendor;
16272                         u32     device;
16273                 } bridge_chipsets[] = {
16274                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16275                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16276                         { },
16277                 };
16278                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16279                 struct pci_dev *bridge = NULL;
16280
16281                 while (pci_id->vendor != 0) {
16282                         bridge = pci_get_device(pci_id->vendor,
16283                                                 pci_id->device,
16284                                                 bridge);
16285                         if (!bridge) {
16286                                 pci_id++;
16287                                 continue;
16288                         }
16289                         if (bridge->subordinate &&
16290                             (bridge->subordinate->number <=
16291                              tp->pdev->bus->number) &&
16292                             (bridge->subordinate->busn_res.end >=
16293                              tp->pdev->bus->number)) {
16294                                 tg3_flag_set(tp, 5701_DMA_BUG);
16295                                 pci_dev_put(bridge);
16296                                 break;
16297                         }
16298                 }
16299         }
16300
16301         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16302          * DMA addresses > 40-bit. This bridge may have other additional
16303          * 57xx devices behind it in some 4-port NIC designs for example.
16304          * Any tg3 device found behind the bridge will also need the 40-bit
16305          * DMA workaround.
16306          */
16307         if (tg3_flag(tp, 5780_CLASS)) {
16308                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16309                 tp->msi_cap = tp->pdev->msi_cap;
16310         } else {
16311                 struct pci_dev *bridge = NULL;
16312
16313                 do {
16314                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16315                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16316                                                 bridge);
16317                         if (bridge && bridge->subordinate &&
16318                             (bridge->subordinate->number <=
16319                              tp->pdev->bus->number) &&
16320                             (bridge->subordinate->busn_res.end >=
16321                              tp->pdev->bus->number)) {
16322                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16323                                 pci_dev_put(bridge);
16324                                 break;
16325                         }
16326                 } while (bridge);
16327         }
16328
16329         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16330             tg3_asic_rev(tp) == ASIC_REV_5714)
16331                 tp->pdev_peer = tg3_find_peer(tp);
16332
16333         /* Determine TSO capabilities */
16334         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16335                 ; /* Do nothing. HW bug. */
16336         else if (tg3_flag(tp, 57765_PLUS))
16337                 tg3_flag_set(tp, HW_TSO_3);
16338         else if (tg3_flag(tp, 5755_PLUS) ||
16339                  tg3_asic_rev(tp) == ASIC_REV_5906)
16340                 tg3_flag_set(tp, HW_TSO_2);
16341         else if (tg3_flag(tp, 5750_PLUS)) {
16342                 tg3_flag_set(tp, HW_TSO_1);
16343                 tg3_flag_set(tp, TSO_BUG);
16344                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16345                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16346                         tg3_flag_clear(tp, TSO_BUG);
16347         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16348                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16349                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16350                 tg3_flag_set(tp, FW_TSO);
16351                 tg3_flag_set(tp, TSO_BUG);
16352                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16353                         tp->fw_needed = FIRMWARE_TG3TSO5;
16354                 else
16355                         tp->fw_needed = FIRMWARE_TG3TSO;
16356         }
16357
16358         /* Selectively allow TSO based on operating conditions */
16359         if (tg3_flag(tp, HW_TSO_1) ||
16360             tg3_flag(tp, HW_TSO_2) ||
16361             tg3_flag(tp, HW_TSO_3) ||
16362             tg3_flag(tp, FW_TSO)) {
16363                 /* For firmware TSO, assume ASF is disabled.
16364                  * We'll disable TSO later if we discover ASF
16365                  * is enabled in tg3_get_eeprom_hw_cfg().
16366                  */
16367                 tg3_flag_set(tp, TSO_CAPABLE);
16368         } else {
16369                 tg3_flag_clear(tp, TSO_CAPABLE);
16370                 tg3_flag_clear(tp, TSO_BUG);
16371                 tp->fw_needed = NULL;
16372         }
16373
16374         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16375                 tp->fw_needed = FIRMWARE_TG3;
16376
16377         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16378                 tp->fw_needed = FIRMWARE_TG357766;
16379
16380         tp->irq_max = 1;
16381
16382         if (tg3_flag(tp, 5750_PLUS)) {
16383                 tg3_flag_set(tp, SUPPORT_MSI);
16384                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16385                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16386                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16387                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16388                      tp->pdev_peer == tp->pdev))
16389                         tg3_flag_clear(tp, SUPPORT_MSI);
16390
16391                 if (tg3_flag(tp, 5755_PLUS) ||
16392                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16393                         tg3_flag_set(tp, 1SHOT_MSI);
16394                 }
16395
16396                 if (tg3_flag(tp, 57765_PLUS)) {
16397                         tg3_flag_set(tp, SUPPORT_MSIX);
16398                         tp->irq_max = TG3_IRQ_MAX_VECS;
16399                 }
16400         }
16401
16402         tp->txq_max = 1;
16403         tp->rxq_max = 1;
16404         if (tp->irq_max > 1) {
16405                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16406                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16407
16408                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16409                     tg3_asic_rev(tp) == ASIC_REV_5720)
16410                         tp->txq_max = tp->irq_max - 1;
16411         }
16412
16413         if (tg3_flag(tp, 5755_PLUS) ||
16414             tg3_asic_rev(tp) == ASIC_REV_5906)
16415                 tg3_flag_set(tp, SHORT_DMA_BUG);
16416
16417         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16418                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16419
16420         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16421             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16422             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16423             tg3_asic_rev(tp) == ASIC_REV_5762)
16424                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16425
16426         if (tg3_flag(tp, 57765_PLUS) &&
16427             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16428                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16429
16430         if (!tg3_flag(tp, 5705_PLUS) ||
16431             tg3_flag(tp, 5780_CLASS) ||
16432             tg3_flag(tp, USE_JUMBO_BDFLAG))
16433                 tg3_flag_set(tp, JUMBO_CAPABLE);
16434
16435         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16436                               &pci_state_reg);
16437
16438         if (pci_is_pcie(tp->pdev)) {
16439                 u16 lnkctl;
16440
16441                 tg3_flag_set(tp, PCI_EXPRESS);
16442
16443                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16444                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16445                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16446                                 tg3_flag_clear(tp, HW_TSO_2);
16447                                 tg3_flag_clear(tp, TSO_CAPABLE);
16448                         }
16449                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16450                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16451                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16452                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16453                                 tg3_flag_set(tp, CLKREQ_BUG);
16454                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16455                         tg3_flag_set(tp, L1PLLPD_EN);
16456                 }
16457         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16458                 /* BCM5785 devices are effectively PCIe devices, and should
16459                  * follow PCIe codepaths, but do not have a PCIe capabilities
16460                  * section.
16461                  */
16462                 tg3_flag_set(tp, PCI_EXPRESS);
16463         } else if (!tg3_flag(tp, 5705_PLUS) ||
16464                    tg3_flag(tp, 5780_CLASS)) {
16465                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16466                 if (!tp->pcix_cap) {
16467                         dev_err(&tp->pdev->dev,
16468                                 "Cannot find PCI-X capability, aborting\n");
16469                         return -EIO;
16470                 }
16471
16472                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16473                         tg3_flag_set(tp, PCIX_MODE);
16474         }
16475
16476         /* If we have an AMD 762 or VIA K8T800 chipset, write
16477          * reordering to the mailbox registers done by the host
16478          * controller can cause major troubles.  We read back from
16479          * every mailbox register write to force the writes to be
16480          * posted to the chip in order.
16481          */
16482         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16483             !tg3_flag(tp, PCI_EXPRESS))
16484                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16485
16486         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16487                              &tp->pci_cacheline_sz);
16488         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16489                              &tp->pci_lat_timer);
16490         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16491             tp->pci_lat_timer < 64) {
16492                 tp->pci_lat_timer = 64;
16493                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16494                                       tp->pci_lat_timer);
16495         }
16496
16497         /* Important! -- It is critical that the PCI-X hw workaround
16498          * situation is decided before the first MMIO register access.
16499          */
16500         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16501                 /* 5700 BX chips need to have their TX producer index
16502                  * mailboxes written twice to workaround a bug.
16503                  */
16504                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16505
16506                 /* If we are in PCI-X mode, enable register write workaround.
16507                  *
16508                  * The workaround is to use indirect register accesses
16509                  * for all chip writes not to mailbox registers.
16510                  */
16511                 if (tg3_flag(tp, PCIX_MODE)) {
16512                         u32 pm_reg;
16513
16514                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16515
16516                         /* The chip can have it's power management PCI config
16517                          * space registers clobbered due to this bug.
16518                          * So explicitly force the chip into D0 here.
16519                          */
16520                         pci_read_config_dword(tp->pdev,
16521                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16522                                               &pm_reg);
16523                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16524                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16525                         pci_write_config_dword(tp->pdev,
16526                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16527                                                pm_reg);
16528
16529                         /* Also, force SERR#/PERR# in PCI command. */
16530                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16531                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16532                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16533                 }
16534         }
16535
16536         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16537                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16538         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16539                 tg3_flag_set(tp, PCI_32BIT);
16540
16541         /* Chip-specific fixup from Broadcom driver */
16542         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16543             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16544                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16545                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16546         }
16547
16548         /* Default fast path register access methods */
16549         tp->read32 = tg3_read32;
16550         tp->write32 = tg3_write32;
16551         tp->read32_mbox = tg3_read32;
16552         tp->write32_mbox = tg3_write32;
16553         tp->write32_tx_mbox = tg3_write32;
16554         tp->write32_rx_mbox = tg3_write32;
16555
16556         /* Various workaround register access methods */
16557         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16558                 tp->write32 = tg3_write_indirect_reg32;
16559         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16560                  (tg3_flag(tp, PCI_EXPRESS) &&
16561                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16562                 /*
16563                  * Back to back register writes can cause problems on these
16564                  * chips, the workaround is to read back all reg writes
16565                  * except those to mailbox regs.
16566                  *
16567                  * See tg3_write_indirect_reg32().
16568                  */
16569                 tp->write32 = tg3_write_flush_reg32;
16570         }
16571
16572         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16573                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16574                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16575                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16576         }
16577
16578         if (tg3_flag(tp, ICH_WORKAROUND)) {
16579                 tp->read32 = tg3_read_indirect_reg32;
16580                 tp->write32 = tg3_write_indirect_reg32;
16581                 tp->read32_mbox = tg3_read_indirect_mbox;
16582                 tp->write32_mbox = tg3_write_indirect_mbox;
16583                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16584                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16585
16586                 iounmap(tp->regs);
16587                 tp->regs = NULL;
16588
16589                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16590                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16591                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16592         }
16593         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16594                 tp->read32_mbox = tg3_read32_mbox_5906;
16595                 tp->write32_mbox = tg3_write32_mbox_5906;
16596                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16597                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16598         }
16599
16600         if (tp->write32 == tg3_write_indirect_reg32 ||
16601             (tg3_flag(tp, PCIX_MODE) &&
16602              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16603               tg3_asic_rev(tp) == ASIC_REV_5701)))
16604                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16605
16606         /* The memory arbiter has to be enabled in order for SRAM accesses
16607          * to succeed.  Normally on powerup the tg3 chip firmware will make
16608          * sure it is enabled, but other entities such as system netboot
16609          * code might disable it.
16610          */
16611         val = tr32(MEMARB_MODE);
16612         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16613
16614         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16615         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16616             tg3_flag(tp, 5780_CLASS)) {
16617                 if (tg3_flag(tp, PCIX_MODE)) {
16618                         pci_read_config_dword(tp->pdev,
16619                                               tp->pcix_cap + PCI_X_STATUS,
16620                                               &val);
16621                         tp->pci_fn = val & 0x7;
16622                 }
16623         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16624                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16625                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16626                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16627                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16628                         val = tr32(TG3_CPMU_STATUS);
16629
16630                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16631                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16632                 else
16633                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16634                                      TG3_CPMU_STATUS_FSHFT_5719;
16635         }
16636
16637         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16638                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16639                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16640         }
16641
16642         /* Get eeprom hw config before calling tg3_set_power_state().
16643          * In particular, the TG3_FLAG_IS_NIC flag must be
16644          * determined before calling tg3_set_power_state() so that
16645          * we know whether or not to switch out of Vaux power.
16646          * When the flag is set, it means that GPIO1 is used for eeprom
16647          * write protect and also implies that it is a LOM where GPIOs
16648          * are not used to switch power.
16649          */
16650         tg3_get_eeprom_hw_cfg(tp);
16651
16652         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16653                 tg3_flag_clear(tp, TSO_CAPABLE);
16654                 tg3_flag_clear(tp, TSO_BUG);
16655                 tp->fw_needed = NULL;
16656         }
16657
16658         if (tg3_flag(tp, ENABLE_APE)) {
16659                 /* Allow reads and writes to the
16660                  * APE register and memory space.
16661                  */
16662                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16663                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16664                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16665                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16666                                        pci_state_reg);
16667
16668                 tg3_ape_lock_init(tp);
16669                 tp->ape_hb_interval =
16670                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16671         }
16672
16673         /* Set up tp->grc_local_ctrl before calling
16674          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16675          * will bring 5700's external PHY out of reset.
16676          * It is also used as eeprom write protect on LOMs.
16677          */
16678         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16679         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16680             tg3_flag(tp, EEPROM_WRITE_PROT))
16681                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16682                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16683         /* Unused GPIO3 must be driven as output on 5752 because there
16684          * are no pull-up resistors on unused GPIO pins.
16685          */
16686         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16687                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16688
16689         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16690             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16691             tg3_flag(tp, 57765_CLASS))
16692                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16693
16694         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16695             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16696                 /* Turn off the debug UART. */
16697                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16698                 if (tg3_flag(tp, IS_NIC))
16699                         /* Keep VMain power. */
16700                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16701                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16702         }
16703
16704         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16705                 tp->grc_local_ctrl |=
16706                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16707
16708         /* Switch out of Vaux if it is a NIC */
16709         tg3_pwrsrc_switch_to_vmain(tp);
16710
16711         /* Derive initial jumbo mode from MTU assigned in
16712          * ether_setup() via the alloc_etherdev() call
16713          */
16714         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16715                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16716
16717         /* Determine WakeOnLan speed to use. */
16718         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16719             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16720             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16721             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16722                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16723         } else {
16724                 tg3_flag_set(tp, WOL_SPEED_100MB);
16725         }
16726
16727         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16728                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16729
16730         /* A few boards don't want Ethernet@WireSpeed phy feature */
16731         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16732             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16733              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16734              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16735             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16736             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16737                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16738
16739         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16740             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16741                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16742         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16743                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16744
16745         if (tg3_flag(tp, 5705_PLUS) &&
16746             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16747             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16748             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16749             !tg3_flag(tp, 57765_PLUS)) {
16750                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16751                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16752                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16753                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16754                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16755                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16756                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16757                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16758                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16759                 } else
16760                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16761         }
16762
16763         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16764             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16765                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16766                 if (tp->phy_otp == 0)
16767                         tp->phy_otp = TG3_OTP_DEFAULT;
16768         }
16769
16770         if (tg3_flag(tp, CPMU_PRESENT))
16771                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16772         else
16773                 tp->mi_mode = MAC_MI_MODE_BASE;
16774
16775         tp->coalesce_mode = 0;
16776         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16777             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16778                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16779
16780         /* Set these bits to enable statistics workaround. */
16781         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16782             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16783             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16784             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16785                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16786                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16787         }
16788
16789         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16790             tg3_asic_rev(tp) == ASIC_REV_57780)
16791                 tg3_flag_set(tp, USE_PHYLIB);
16792
16793         err = tg3_mdio_init(tp);
16794         if (err)
16795                 return err;
16796
16797         /* Initialize data/descriptor byte/word swapping. */
16798         val = tr32(GRC_MODE);
16799         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16800             tg3_asic_rev(tp) == ASIC_REV_5762)
16801                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16802                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16803                         GRC_MODE_B2HRX_ENABLE |
16804                         GRC_MODE_HTX2B_ENABLE |
16805                         GRC_MODE_HOST_STACKUP);
16806         else
16807                 val &= GRC_MODE_HOST_STACKUP;
16808
16809         tw32(GRC_MODE, val | tp->grc_mode);
16810
16811         tg3_switch_clocks(tp);
16812
16813         /* Clear this out for sanity. */
16814         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16815
16816         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16817         tw32(TG3PCI_REG_BASE_ADDR, 0);
16818
16819         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16820                               &pci_state_reg);
16821         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16822             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16823                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16824                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16825                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16826                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16827                         void __iomem *sram_base;
16828
16829                         /* Write some dummy words into the SRAM status block
16830                          * area, see if it reads back correctly.  If the return
16831                          * value is bad, force enable the PCIX workaround.
16832                          */
16833                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16834
16835                         writel(0x00000000, sram_base);
16836                         writel(0x00000000, sram_base + 4);
16837                         writel(0xffffffff, sram_base + 4);
16838                         if (readl(sram_base) != 0x00000000)
16839                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16840                 }
16841         }
16842
16843         udelay(50);
16844         tg3_nvram_init(tp);
16845
16846         /* If the device has an NVRAM, no need to load patch firmware */
16847         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16848             !tg3_flag(tp, NO_NVRAM))
16849                 tp->fw_needed = NULL;
16850
16851         grc_misc_cfg = tr32(GRC_MISC_CFG);
16852         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16853
16854         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16855             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16856              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16857                 tg3_flag_set(tp, IS_5788);
16858
16859         if (!tg3_flag(tp, IS_5788) &&
16860             tg3_asic_rev(tp) != ASIC_REV_5700)
16861                 tg3_flag_set(tp, TAGGED_STATUS);
16862         if (tg3_flag(tp, TAGGED_STATUS)) {
16863                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16864                                       HOSTCC_MODE_CLRTICK_TXBD);
16865
16866                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16867                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16868                                        tp->misc_host_ctrl);
16869         }
16870
16871         /* Preserve the APE MAC_MODE bits */
16872         if (tg3_flag(tp, ENABLE_APE))
16873                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16874         else
16875                 tp->mac_mode = 0;
16876
16877         if (tg3_10_100_only_device(tp, ent))
16878                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16879
16880         err = tg3_phy_probe(tp);
16881         if (err) {
16882                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16883                 /* ... but do not return immediately ... */
16884                 tg3_mdio_fini(tp);
16885         }
16886
16887         tg3_read_vpd(tp);
16888         tg3_read_fw_ver(tp);
16889
16890         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16891                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16892         } else {
16893                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16894                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16895                 else
16896                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16897         }
16898
16899         /* 5700 {AX,BX} chips have a broken status block link
16900          * change bit implementation, so we must use the
16901          * status register in those cases.
16902          */
16903         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16904                 tg3_flag_set(tp, USE_LINKCHG_REG);
16905         else
16906                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16907
16908         /* The led_ctrl is set during tg3_phy_probe, here we might
16909          * have to force the link status polling mechanism based
16910          * upon subsystem IDs.
16911          */
16912         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16913             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16914             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16915                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16916                 tg3_flag_set(tp, USE_LINKCHG_REG);
16917         }
16918
16919         /* For all SERDES we poll the MAC status register. */
16920         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16921                 tg3_flag_set(tp, POLL_SERDES);
16922         else
16923                 tg3_flag_clear(tp, POLL_SERDES);
16924
16925         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16926                 tg3_flag_set(tp, POLL_CPMU_LINK);
16927
16928         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16929         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16930         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16931             tg3_flag(tp, PCIX_MODE)) {
16932                 tp->rx_offset = NET_SKB_PAD;
16933 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16934                 tp->rx_copy_thresh = ~(u16)0;
16935 #endif
16936         }
16937
16938         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16939         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16940         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16941
16942         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16943
16944         /* Increment the rx prod index on the rx std ring by at most
16945          * 8 for these chips to workaround hw errata.
16946          */
16947         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16948             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16949             tg3_asic_rev(tp) == ASIC_REV_5755)
16950                 tp->rx_std_max_post = 8;
16951
16952         if (tg3_flag(tp, ASPM_WORKAROUND))
16953                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16954                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16955
16956         return err;
16957 }
16958
16959 #ifdef CONFIG_SPARC
16960 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16961 {
16962         struct net_device *dev = tp->dev;
16963         struct pci_dev *pdev = tp->pdev;
16964         struct device_node *dp = pci_device_to_OF_node(pdev);
16965         const unsigned char *addr;
16966         int len;
16967
16968         addr = of_get_property(dp, "local-mac-address", &len);
16969         if (addr && len == ETH_ALEN) {
16970                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16971                 return 0;
16972         }
16973         return -ENODEV;
16974 }
16975
16976 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16977 {
16978         struct net_device *dev = tp->dev;
16979
16980         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16981         return 0;
16982 }
16983 #endif
16984
16985 static int tg3_get_device_address(struct tg3 *tp)
16986 {
16987         struct net_device *dev = tp->dev;
16988         u32 hi, lo, mac_offset;
16989         int addr_ok = 0;
16990         int err;
16991
16992 #ifdef CONFIG_SPARC
16993         if (!tg3_get_macaddr_sparc(tp))
16994                 return 0;
16995 #endif
16996
16997         if (tg3_flag(tp, IS_SSB_CORE)) {
16998                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16999                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17000                         return 0;
17001         }
17002
17003         mac_offset = 0x7c;
17004         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17005             tg3_flag(tp, 5780_CLASS)) {
17006                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17007                         mac_offset = 0xcc;
17008                 if (tg3_nvram_lock(tp))
17009                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17010                 else
17011                         tg3_nvram_unlock(tp);
17012         } else if (tg3_flag(tp, 5717_PLUS)) {
17013                 if (tp->pci_fn & 1)
17014                         mac_offset = 0xcc;
17015                 if (tp->pci_fn > 1)
17016                         mac_offset += 0x18c;
17017         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17018                 mac_offset = 0x10;
17019
17020         /* First try to get it from MAC address mailbox. */
17021         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17022         if ((hi >> 16) == 0x484b) {
17023                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17024                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17025
17026                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17027                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17028                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17029                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17030                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17031
17032                 /* Some old bootcode may report a 0 MAC address in SRAM */
17033                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17034         }
17035         if (!addr_ok) {
17036                 /* Next, try NVRAM. */
17037                 if (!tg3_flag(tp, NO_NVRAM) &&
17038                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17039                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17040                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17041                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17042                 }
17043                 /* Finally just fetch it out of the MAC control regs. */
17044                 else {
17045                         hi = tr32(MAC_ADDR_0_HIGH);
17046                         lo = tr32(MAC_ADDR_0_LOW);
17047
17048                         dev->dev_addr[5] = lo & 0xff;
17049                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17050                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17051                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17052                         dev->dev_addr[1] = hi & 0xff;
17053                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17054                 }
17055         }
17056
17057         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17058 #ifdef CONFIG_SPARC
17059                 if (!tg3_get_default_macaddr_sparc(tp))
17060                         return 0;
17061 #endif
17062                 return -EINVAL;
17063         }
17064         return 0;
17065 }
17066
17067 #define BOUNDARY_SINGLE_CACHELINE       1
17068 #define BOUNDARY_MULTI_CACHELINE        2
17069
17070 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17071 {
17072         int cacheline_size;
17073         u8 byte;
17074         int goal;
17075
17076         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17077         if (byte == 0)
17078                 cacheline_size = 1024;
17079         else
17080                 cacheline_size = (int) byte * 4;
17081
17082         /* On 5703 and later chips, the boundary bits have no
17083          * effect.
17084          */
17085         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17086             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17087             !tg3_flag(tp, PCI_EXPRESS))
17088                 goto out;
17089
17090 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17091         goal = BOUNDARY_MULTI_CACHELINE;
17092 #else
17093 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17094         goal = BOUNDARY_SINGLE_CACHELINE;
17095 #else
17096         goal = 0;
17097 #endif
17098 #endif
17099
17100         if (tg3_flag(tp, 57765_PLUS)) {
17101                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17102                 goto out;
17103         }
17104
17105         if (!goal)
17106                 goto out;
17107
17108         /* PCI controllers on most RISC systems tend to disconnect
17109          * when a device tries to burst across a cache-line boundary.
17110          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17111          *
17112          * Unfortunately, for PCI-E there are only limited
17113          * write-side controls for this, and thus for reads
17114          * we will still get the disconnects.  We'll also waste
17115          * these PCI cycles for both read and write for chips
17116          * other than 5700 and 5701 which do not implement the
17117          * boundary bits.
17118          */
17119         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17120                 switch (cacheline_size) {
17121                 case 16:
17122                 case 32:
17123                 case 64:
17124                 case 128:
17125                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17126                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17127                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17128                         } else {
17129                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17130                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17131                         }
17132                         break;
17133
17134                 case 256:
17135                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17136                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17137                         break;
17138
17139                 default:
17140                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17141                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17142                         break;
17143                 }
17144         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17145                 switch (cacheline_size) {
17146                 case 16:
17147                 case 32:
17148                 case 64:
17149                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17150                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17151                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17152                                 break;
17153                         }
17154                         /* fallthrough */
17155                 case 128:
17156                 default:
17157                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17158                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17159                         break;
17160                 }
17161         } else {
17162                 switch (cacheline_size) {
17163                 case 16:
17164                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17165                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17166                                         DMA_RWCTRL_WRITE_BNDRY_16);
17167                                 break;
17168                         }
17169                         /* fallthrough */
17170                 case 32:
17171                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17172                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17173                                         DMA_RWCTRL_WRITE_BNDRY_32);
17174                                 break;
17175                         }
17176                         /* fallthrough */
17177                 case 64:
17178                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17179                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17180                                         DMA_RWCTRL_WRITE_BNDRY_64);
17181                                 break;
17182                         }
17183                         /* fallthrough */
17184                 case 128:
17185                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17186                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17187                                         DMA_RWCTRL_WRITE_BNDRY_128);
17188                                 break;
17189                         }
17190                         /* fallthrough */
17191                 case 256:
17192                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17193                                 DMA_RWCTRL_WRITE_BNDRY_256);
17194                         break;
17195                 case 512:
17196                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17197                                 DMA_RWCTRL_WRITE_BNDRY_512);
17198                         break;
17199                 case 1024:
17200                 default:
17201                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17202                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17203                         break;
17204                 }
17205         }
17206
17207 out:
17208         return val;
17209 }
17210
17211 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17212                            int size, bool to_device)
17213 {
17214         struct tg3_internal_buffer_desc test_desc;
17215         u32 sram_dma_descs;
17216         int i, ret;
17217
17218         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17219
17220         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17221         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17222         tw32(RDMAC_STATUS, 0);
17223         tw32(WDMAC_STATUS, 0);
17224
17225         tw32(BUFMGR_MODE, 0);
17226         tw32(FTQ_RESET, 0);
17227
17228         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17229         test_desc.addr_lo = buf_dma & 0xffffffff;
17230         test_desc.nic_mbuf = 0x00002100;
17231         test_desc.len = size;
17232
17233         /*
17234          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17235          * the *second* time the tg3 driver was getting loaded after an
17236          * initial scan.
17237          *
17238          * Broadcom tells me:
17239          *   ...the DMA engine is connected to the GRC block and a DMA
17240          *   reset may affect the GRC block in some unpredictable way...
17241          *   The behavior of resets to individual blocks has not been tested.
17242          *
17243          * Broadcom noted the GRC reset will also reset all sub-components.
17244          */
17245         if (to_device) {
17246                 test_desc.cqid_sqid = (13 << 8) | 2;
17247
17248                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17249                 udelay(40);
17250         } else {
17251                 test_desc.cqid_sqid = (16 << 8) | 7;
17252
17253                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17254                 udelay(40);
17255         }
17256         test_desc.flags = 0x00000005;
17257
17258         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17259                 u32 val;
17260
17261                 val = *(((u32 *)&test_desc) + i);
17262                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17263                                        sram_dma_descs + (i * sizeof(u32)));
17264                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17265         }
17266         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17267
17268         if (to_device)
17269                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17270         else
17271                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17272
17273         ret = -ENODEV;
17274         for (i = 0; i < 40; i++) {
17275                 u32 val;
17276
17277                 if (to_device)
17278                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17279                 else
17280                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17281                 if ((val & 0xffff) == sram_dma_descs) {
17282                         ret = 0;
17283                         break;
17284                 }
17285
17286                 udelay(100);
17287         }
17288
17289         return ret;
17290 }
17291
17292 #define TEST_BUFFER_SIZE        0x2000
17293
17294 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17295         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17296         { },
17297 };
17298
17299 static int tg3_test_dma(struct tg3 *tp)
17300 {
17301         dma_addr_t buf_dma;
17302         u32 *buf, saved_dma_rwctrl;
17303         int ret = 0;
17304
17305         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17306                                  &buf_dma, GFP_KERNEL);
17307         if (!buf) {
17308                 ret = -ENOMEM;
17309                 goto out_nofree;
17310         }
17311
17312         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17313                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17314
17315         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17316
17317         if (tg3_flag(tp, 57765_PLUS))
17318                 goto out;
17319
17320         if (tg3_flag(tp, PCI_EXPRESS)) {
17321                 /* DMA read watermark not used on PCIE */
17322                 tp->dma_rwctrl |= 0x00180000;
17323         } else if (!tg3_flag(tp, PCIX_MODE)) {
17324                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17325                     tg3_asic_rev(tp) == ASIC_REV_5750)
17326                         tp->dma_rwctrl |= 0x003f0000;
17327                 else
17328                         tp->dma_rwctrl |= 0x003f000f;
17329         } else {
17330                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17331                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17332                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17333                         u32 read_water = 0x7;
17334
17335                         /* If the 5704 is behind the EPB bridge, we can
17336                          * do the less restrictive ONE_DMA workaround for
17337                          * better performance.
17338                          */
17339                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17340                             tg3_asic_rev(tp) == ASIC_REV_5704)
17341                                 tp->dma_rwctrl |= 0x8000;
17342                         else if (ccval == 0x6 || ccval == 0x7)
17343                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17344
17345                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17346                                 read_water = 4;
17347                         /* Set bit 23 to enable PCIX hw bug fix */
17348                         tp->dma_rwctrl |=
17349                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17350                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17351                                 (1 << 23);
17352                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17353                         /* 5780 always in PCIX mode */
17354                         tp->dma_rwctrl |= 0x00144000;
17355                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17356                         /* 5714 always in PCIX mode */
17357                         tp->dma_rwctrl |= 0x00148000;
17358                 } else {
17359                         tp->dma_rwctrl |= 0x001b000f;
17360                 }
17361         }
17362         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17363                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17364
17365         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17366             tg3_asic_rev(tp) == ASIC_REV_5704)
17367                 tp->dma_rwctrl &= 0xfffffff0;
17368
17369         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17370             tg3_asic_rev(tp) == ASIC_REV_5701) {
17371                 /* Remove this if it causes problems for some boards. */
17372                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17373
17374                 /* On 5700/5701 chips, we need to set this bit.
17375                  * Otherwise the chip will issue cacheline transactions
17376                  * to streamable DMA memory with not all the byte
17377                  * enables turned on.  This is an error on several
17378                  * RISC PCI controllers, in particular sparc64.
17379                  *
17380                  * On 5703/5704 chips, this bit has been reassigned
17381                  * a different meaning.  In particular, it is used
17382                  * on those chips to enable a PCI-X workaround.
17383                  */
17384                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17385         }
17386
17387         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17388
17389
17390         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17391             tg3_asic_rev(tp) != ASIC_REV_5701)
17392                 goto out;
17393
17394         /* It is best to perform DMA test with maximum write burst size
17395          * to expose the 5700/5701 write DMA bug.
17396          */
17397         saved_dma_rwctrl = tp->dma_rwctrl;
17398         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17399         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17400
17401         while (1) {
17402                 u32 *p = buf, i;
17403
17404                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17405                         p[i] = i;
17406
17407                 /* Send the buffer to the chip. */
17408                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17409                 if (ret) {
17410                         dev_err(&tp->pdev->dev,
17411                                 "%s: Buffer write failed. err = %d\n",
17412                                 __func__, ret);
17413                         break;
17414                 }
17415
17416                 /* Now read it back. */
17417                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17418                 if (ret) {
17419                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17420                                 "err = %d\n", __func__, ret);
17421                         break;
17422                 }
17423
17424                 /* Verify it. */
17425                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17426                         if (p[i] == i)
17427                                 continue;
17428
17429                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17430                             DMA_RWCTRL_WRITE_BNDRY_16) {
17431                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17432                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17433                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17434                                 break;
17435                         } else {
17436                                 dev_err(&tp->pdev->dev,
17437                                         "%s: Buffer corrupted on read back! "
17438                                         "(%d != %d)\n", __func__, p[i], i);
17439                                 ret = -ENODEV;
17440                                 goto out;
17441                         }
17442                 }
17443
17444                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17445                         /* Success. */
17446                         ret = 0;
17447                         break;
17448                 }
17449         }
17450         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17451             DMA_RWCTRL_WRITE_BNDRY_16) {
17452                 /* DMA test passed without adjusting DMA boundary,
17453                  * now look for chipsets that are known to expose the
17454                  * DMA bug without failing the test.
17455                  */
17456                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17457                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17458                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17459                 } else {
17460                         /* Safe to use the calculated DMA boundary. */
17461                         tp->dma_rwctrl = saved_dma_rwctrl;
17462                 }
17463
17464                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17465         }
17466
17467 out:
17468         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17469 out_nofree:
17470         return ret;
17471 }
17472
17473 static void tg3_init_bufmgr_config(struct tg3 *tp)
17474 {
17475         if (tg3_flag(tp, 57765_PLUS)) {
17476                 tp->bufmgr_config.mbuf_read_dma_low_water =
17477                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17478                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17479                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17480                 tp->bufmgr_config.mbuf_high_water =
17481                         DEFAULT_MB_HIGH_WATER_57765;
17482
17483                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17484                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17485                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17486                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17487                 tp->bufmgr_config.mbuf_high_water_jumbo =
17488                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17489         } else if (tg3_flag(tp, 5705_PLUS)) {
17490                 tp->bufmgr_config.mbuf_read_dma_low_water =
17491                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17492                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17493                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17494                 tp->bufmgr_config.mbuf_high_water =
17495                         DEFAULT_MB_HIGH_WATER_5705;
17496                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17497                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17498                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17499                         tp->bufmgr_config.mbuf_high_water =
17500                                 DEFAULT_MB_HIGH_WATER_5906;
17501                 }
17502
17503                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17504                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17505                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17506                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17507                 tp->bufmgr_config.mbuf_high_water_jumbo =
17508                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17509         } else {
17510                 tp->bufmgr_config.mbuf_read_dma_low_water =
17511                         DEFAULT_MB_RDMA_LOW_WATER;
17512                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17513                         DEFAULT_MB_MACRX_LOW_WATER;
17514                 tp->bufmgr_config.mbuf_high_water =
17515                         DEFAULT_MB_HIGH_WATER;
17516
17517                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17518                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17519                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17520                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17521                 tp->bufmgr_config.mbuf_high_water_jumbo =
17522                         DEFAULT_MB_HIGH_WATER_JUMBO;
17523         }
17524
17525         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17526         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17527 }
17528
17529 static char *tg3_phy_string(struct tg3 *tp)
17530 {
17531         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17532         case TG3_PHY_ID_BCM5400:        return "5400";
17533         case TG3_PHY_ID_BCM5401:        return "5401";
17534         case TG3_PHY_ID_BCM5411:        return "5411";
17535         case TG3_PHY_ID_BCM5701:        return "5701";
17536         case TG3_PHY_ID_BCM5703:        return "5703";
17537         case TG3_PHY_ID_BCM5704:        return "5704";
17538         case TG3_PHY_ID_BCM5705:        return "5705";
17539         case TG3_PHY_ID_BCM5750:        return "5750";
17540         case TG3_PHY_ID_BCM5752:        return "5752";
17541         case TG3_PHY_ID_BCM5714:        return "5714";
17542         case TG3_PHY_ID_BCM5780:        return "5780";
17543         case TG3_PHY_ID_BCM5755:        return "5755";
17544         case TG3_PHY_ID_BCM5787:        return "5787";
17545         case TG3_PHY_ID_BCM5784:        return "5784";
17546         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17547         case TG3_PHY_ID_BCM5906:        return "5906";
17548         case TG3_PHY_ID_BCM5761:        return "5761";
17549         case TG3_PHY_ID_BCM5718C:       return "5718C";
17550         case TG3_PHY_ID_BCM5718S:       return "5718S";
17551         case TG3_PHY_ID_BCM57765:       return "57765";
17552         case TG3_PHY_ID_BCM5719C:       return "5719C";
17553         case TG3_PHY_ID_BCM5720C:       return "5720C";
17554         case TG3_PHY_ID_BCM5762:        return "5762C";
17555         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17556         case 0:                 return "serdes";
17557         default:                return "unknown";
17558         }
17559 }
17560
17561 static char *tg3_bus_string(struct tg3 *tp, char *str)
17562 {
17563         if (tg3_flag(tp, PCI_EXPRESS)) {
17564                 strcpy(str, "PCI Express");
17565                 return str;
17566         } else if (tg3_flag(tp, PCIX_MODE)) {
17567                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17568
17569                 strcpy(str, "PCIX:");
17570
17571                 if ((clock_ctrl == 7) ||
17572                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17573                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17574                         strcat(str, "133MHz");
17575                 else if (clock_ctrl == 0)
17576                         strcat(str, "33MHz");
17577                 else if (clock_ctrl == 2)
17578                         strcat(str, "50MHz");
17579                 else if (clock_ctrl == 4)
17580                         strcat(str, "66MHz");
17581                 else if (clock_ctrl == 6)
17582                         strcat(str, "100MHz");
17583         } else {
17584                 strcpy(str, "PCI:");
17585                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17586                         strcat(str, "66MHz");
17587                 else
17588                         strcat(str, "33MHz");
17589         }
17590         if (tg3_flag(tp, PCI_32BIT))
17591                 strcat(str, ":32-bit");
17592         else
17593                 strcat(str, ":64-bit");
17594         return str;
17595 }
17596
17597 static void tg3_init_coal(struct tg3 *tp)
17598 {
17599         struct ethtool_coalesce *ec = &tp->coal;
17600
17601         memset(ec, 0, sizeof(*ec));
17602         ec->cmd = ETHTOOL_GCOALESCE;
17603         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17604         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17605         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17606         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17607         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17608         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17609         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17610         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17611         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17612
17613         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17614                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17615                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17616                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17617                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17618                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17619         }
17620
17621         if (tg3_flag(tp, 5705_PLUS)) {
17622                 ec->rx_coalesce_usecs_irq = 0;
17623                 ec->tx_coalesce_usecs_irq = 0;
17624                 ec->stats_block_coalesce_usecs = 0;
17625         }
17626 }
17627
17628 static int tg3_init_one(struct pci_dev *pdev,
17629                                   const struct pci_device_id *ent)
17630 {
17631         struct net_device *dev;
17632         struct tg3 *tp;
17633         int i, err;
17634         u32 sndmbx, rcvmbx, intmbx;
17635         char str[40];
17636         u64 dma_mask, persist_dma_mask;
17637         netdev_features_t features = 0;
17638
17639         printk_once(KERN_INFO "%s\n", version);
17640
17641         err = pci_enable_device(pdev);
17642         if (err) {
17643                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17644                 return err;
17645         }
17646
17647         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17648         if (err) {
17649                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17650                 goto err_out_disable_pdev;
17651         }
17652
17653         pci_set_master(pdev);
17654
17655         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17656         if (!dev) {
17657                 err = -ENOMEM;
17658                 goto err_out_free_res;
17659         }
17660
17661         SET_NETDEV_DEV(dev, &pdev->dev);
17662
17663         tp = netdev_priv(dev);
17664         tp->pdev = pdev;
17665         tp->dev = dev;
17666         tp->rx_mode = TG3_DEF_RX_MODE;
17667         tp->tx_mode = TG3_DEF_TX_MODE;
17668         tp->irq_sync = 1;
17669         tp->pcierr_recovery = false;
17670
17671         if (tg3_debug > 0)
17672                 tp->msg_enable = tg3_debug;
17673         else
17674                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17675
17676         if (pdev_is_ssb_gige_core(pdev)) {
17677                 tg3_flag_set(tp, IS_SSB_CORE);
17678                 if (ssb_gige_must_flush_posted_writes(pdev))
17679                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17680                 if (ssb_gige_one_dma_at_once(pdev))
17681                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17682                 if (ssb_gige_have_roboswitch(pdev)) {
17683                         tg3_flag_set(tp, USE_PHYLIB);
17684                         tg3_flag_set(tp, ROBOSWITCH);
17685                 }
17686                 if (ssb_gige_is_rgmii(pdev))
17687                         tg3_flag_set(tp, RGMII_MODE);
17688         }
17689
17690         /* The word/byte swap controls here control register access byte
17691          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17692          * setting below.
17693          */
17694         tp->misc_host_ctrl =
17695                 MISC_HOST_CTRL_MASK_PCI_INT |
17696                 MISC_HOST_CTRL_WORD_SWAP |
17697                 MISC_HOST_CTRL_INDIR_ACCESS |
17698                 MISC_HOST_CTRL_PCISTATE_RW;
17699
17700         /* The NONFRM (non-frame) byte/word swap controls take effect
17701          * on descriptor entries, anything which isn't packet data.
17702          *
17703          * The StrongARM chips on the board (one for tx, one for rx)
17704          * are running in big-endian mode.
17705          */
17706         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17707                         GRC_MODE_WSWAP_NONFRM_DATA);
17708 #ifdef __BIG_ENDIAN
17709         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17710 #endif
17711         spin_lock_init(&tp->lock);
17712         spin_lock_init(&tp->indirect_lock);
17713         INIT_WORK(&tp->reset_task, tg3_reset_task);
17714
17715         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17716         if (!tp->regs) {
17717                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17718                 err = -ENOMEM;
17719                 goto err_out_free_dev;
17720         }
17721
17722         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17723             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17729             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17730             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17731             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17732             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17733             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17734             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17735             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17736             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17737                 tg3_flag_set(tp, ENABLE_APE);
17738                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17739                 if (!tp->aperegs) {
17740                         dev_err(&pdev->dev,
17741                                 "Cannot map APE registers, aborting\n");
17742                         err = -ENOMEM;
17743                         goto err_out_iounmap;
17744                 }
17745         }
17746
17747         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17748         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17749
17750         dev->ethtool_ops = &tg3_ethtool_ops;
17751         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17752         dev->netdev_ops = &tg3_netdev_ops;
17753         dev->irq = pdev->irq;
17754
17755         err = tg3_get_invariants(tp, ent);
17756         if (err) {
17757                 dev_err(&pdev->dev,
17758                         "Problem fetching invariants of chip, aborting\n");
17759                 goto err_out_apeunmap;
17760         }
17761
17762         /* The EPB bridge inside 5714, 5715, and 5780 and any
17763          * device behind the EPB cannot support DMA addresses > 40-bit.
17764          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17765          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17766          * do DMA address check in tg3_start_xmit().
17767          */
17768         if (tg3_flag(tp, IS_5788))
17769                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17770         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17771                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17772 #ifdef CONFIG_HIGHMEM
17773                 dma_mask = DMA_BIT_MASK(64);
17774 #endif
17775         } else
17776                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17777
17778         /* Configure DMA attributes. */
17779         if (dma_mask > DMA_BIT_MASK(32)) {
17780                 err = pci_set_dma_mask(pdev, dma_mask);
17781                 if (!err) {
17782                         features |= NETIF_F_HIGHDMA;
17783                         err = pci_set_consistent_dma_mask(pdev,
17784                                                           persist_dma_mask);
17785                         if (err < 0) {
17786                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17787                                         "DMA for consistent allocations\n");
17788                                 goto err_out_apeunmap;
17789                         }
17790                 }
17791         }
17792         if (err || dma_mask == DMA_BIT_MASK(32)) {
17793                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17794                 if (err) {
17795                         dev_err(&pdev->dev,
17796                                 "No usable DMA configuration, aborting\n");
17797                         goto err_out_apeunmap;
17798                 }
17799         }
17800
17801         tg3_init_bufmgr_config(tp);
17802
17803         /* 5700 B0 chips do not support checksumming correctly due
17804          * to hardware bugs.
17805          */
17806         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17807                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17808
17809                 if (tg3_flag(tp, 5755_PLUS))
17810                         features |= NETIF_F_IPV6_CSUM;
17811         }
17812
17813         /* TSO is on by default on chips that support hardware TSO.
17814          * Firmware TSO on older chips gives lower performance, so it
17815          * is off by default, but can be enabled using ethtool.
17816          */
17817         if ((tg3_flag(tp, HW_TSO_1) ||
17818              tg3_flag(tp, HW_TSO_2) ||
17819              tg3_flag(tp, HW_TSO_3)) &&
17820             (features & NETIF_F_IP_CSUM))
17821                 features |= NETIF_F_TSO;
17822         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17823                 if (features & NETIF_F_IPV6_CSUM)
17824                         features |= NETIF_F_TSO6;
17825                 if (tg3_flag(tp, HW_TSO_3) ||
17826                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17827                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17828                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17829                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17830                     tg3_asic_rev(tp) == ASIC_REV_57780)
17831                         features |= NETIF_F_TSO_ECN;
17832         }
17833
17834         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17835                          NETIF_F_HW_VLAN_CTAG_RX;
17836         dev->vlan_features |= features;
17837
17838         /*
17839          * Add loopback capability only for a subset of devices that support
17840          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17841          * loopback for the remaining devices.
17842          */
17843         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17844             !tg3_flag(tp, CPMU_PRESENT))
17845                 /* Add the loopback capability */
17846                 features |= NETIF_F_LOOPBACK;
17847
17848         dev->hw_features |= features;
17849         dev->priv_flags |= IFF_UNICAST_FLT;
17850
17851         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17852         dev->min_mtu = TG3_MIN_MTU;
17853         dev->max_mtu = TG3_MAX_MTU(tp);
17854
17855         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17856             !tg3_flag(tp, TSO_CAPABLE) &&
17857             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17858                 tg3_flag_set(tp, MAX_RXPEND_64);
17859                 tp->rx_pending = 63;
17860         }
17861
17862         err = tg3_get_device_address(tp);
17863         if (err) {
17864                 dev_err(&pdev->dev,
17865                         "Could not obtain valid ethernet address, aborting\n");
17866                 goto err_out_apeunmap;
17867         }
17868
17869         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17870         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17871         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17872         for (i = 0; i < tp->irq_max; i++) {
17873                 struct tg3_napi *tnapi = &tp->napi[i];
17874
17875                 tnapi->tp = tp;
17876                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17877
17878                 tnapi->int_mbox = intmbx;
17879                 if (i <= 4)
17880                         intmbx += 0x8;
17881                 else
17882                         intmbx += 0x4;
17883
17884                 tnapi->consmbox = rcvmbx;
17885                 tnapi->prodmbox = sndmbx;
17886
17887                 if (i)
17888                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17889                 else
17890                         tnapi->coal_now = HOSTCC_MODE_NOW;
17891
17892                 if (!tg3_flag(tp, SUPPORT_MSIX))
17893                         break;
17894
17895                 /*
17896                  * If we support MSIX, we'll be using RSS.  If we're using
17897                  * RSS, the first vector only handles link interrupts and the
17898                  * remaining vectors handle rx and tx interrupts.  Reuse the
17899                  * mailbox values for the next iteration.  The values we setup
17900                  * above are still useful for the single vectored mode.
17901                  */
17902                 if (!i)
17903                         continue;
17904
17905                 rcvmbx += 0x8;
17906
17907                 if (sndmbx & 0x4)
17908                         sndmbx -= 0x4;
17909                 else
17910                         sndmbx += 0xc;
17911         }
17912
17913         /*
17914          * Reset chip in case UNDI or EFI driver did not shutdown
17915          * DMA self test will enable WDMAC and we'll see (spurious)
17916          * pending DMA on the PCI bus at that point.
17917          */
17918         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17919             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17920                 tg3_full_lock(tp, 0);
17921                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17922                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17923                 tg3_full_unlock(tp);
17924         }
17925
17926         err = tg3_test_dma(tp);
17927         if (err) {
17928                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17929                 goto err_out_apeunmap;
17930         }
17931
17932         tg3_init_coal(tp);
17933
17934         pci_set_drvdata(pdev, dev);
17935
17936         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17937             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17938             tg3_asic_rev(tp) == ASIC_REV_5762)
17939                 tg3_flag_set(tp, PTP_CAPABLE);
17940
17941         tg3_timer_init(tp);
17942
17943         tg3_carrier_off(tp);
17944
17945         err = register_netdev(dev);
17946         if (err) {
17947                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17948                 goto err_out_apeunmap;
17949         }
17950
17951         if (tg3_flag(tp, PTP_CAPABLE)) {
17952                 tg3_ptp_init(tp);
17953                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17954                                                    &tp->pdev->dev);
17955                 if (IS_ERR(tp->ptp_clock))
17956                         tp->ptp_clock = NULL;
17957         }
17958
17959         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17960                     tp->board_part_number,
17961                     tg3_chip_rev_id(tp),
17962                     tg3_bus_string(tp, str),
17963                     dev->dev_addr);
17964
17965         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17966                 char *ethtype;
17967
17968                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17969                         ethtype = "10/100Base-TX";
17970                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17971                         ethtype = "1000Base-SX";
17972                 else
17973                         ethtype = "10/100/1000Base-T";
17974
17975                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17976                             "(WireSpeed[%d], EEE[%d])\n",
17977                             tg3_phy_string(tp), ethtype,
17978                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17979                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17980         }
17981
17982         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17983                     (dev->features & NETIF_F_RXCSUM) != 0,
17984                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17985                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17986                     tg3_flag(tp, ENABLE_ASF) != 0,
17987                     tg3_flag(tp, TSO_CAPABLE) != 0);
17988         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17989                     tp->dma_rwctrl,
17990                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17991                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17992
17993         pci_save_state(pdev);
17994
17995         return 0;
17996
17997 err_out_apeunmap:
17998         if (tp->aperegs) {
17999                 iounmap(tp->aperegs);
18000                 tp->aperegs = NULL;
18001         }
18002
18003 err_out_iounmap:
18004         if (tp->regs) {
18005                 iounmap(tp->regs);
18006                 tp->regs = NULL;
18007         }
18008
18009 err_out_free_dev:
18010         free_netdev(dev);
18011
18012 err_out_free_res:
18013         pci_release_regions(pdev);
18014
18015 err_out_disable_pdev:
18016         if (pci_is_enabled(pdev))
18017                 pci_disable_device(pdev);
18018         return err;
18019 }
18020
18021 static void tg3_remove_one(struct pci_dev *pdev)
18022 {
18023         struct net_device *dev = pci_get_drvdata(pdev);
18024
18025         if (dev) {
18026                 struct tg3 *tp = netdev_priv(dev);
18027
18028                 tg3_ptp_fini(tp);
18029
18030                 release_firmware(tp->fw);
18031
18032                 tg3_reset_task_cancel(tp);
18033
18034                 if (tg3_flag(tp, USE_PHYLIB)) {
18035                         tg3_phy_fini(tp);
18036                         tg3_mdio_fini(tp);
18037                 }
18038
18039                 unregister_netdev(dev);
18040                 if (tp->aperegs) {
18041                         iounmap(tp->aperegs);
18042                         tp->aperegs = NULL;
18043                 }
18044                 if (tp->regs) {
18045                         iounmap(tp->regs);
18046                         tp->regs = NULL;
18047                 }
18048                 free_netdev(dev);
18049                 pci_release_regions(pdev);
18050                 pci_disable_device(pdev);
18051         }
18052 }
18053
18054 #ifdef CONFIG_PM_SLEEP
18055 static int tg3_suspend(struct device *device)
18056 {
18057         struct pci_dev *pdev = to_pci_dev(device);
18058         struct net_device *dev = pci_get_drvdata(pdev);
18059         struct tg3 *tp = netdev_priv(dev);
18060         int err = 0;
18061
18062         rtnl_lock();
18063
18064         if (!netif_running(dev))
18065                 goto unlock;
18066
18067         tg3_reset_task_cancel(tp);
18068         tg3_phy_stop(tp);
18069         tg3_netif_stop(tp);
18070
18071         tg3_timer_stop(tp);
18072
18073         tg3_full_lock(tp, 1);
18074         tg3_disable_ints(tp);
18075         tg3_full_unlock(tp);
18076
18077         netif_device_detach(dev);
18078
18079         tg3_full_lock(tp, 0);
18080         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18081         tg3_flag_clear(tp, INIT_COMPLETE);
18082         tg3_full_unlock(tp);
18083
18084         err = tg3_power_down_prepare(tp);
18085         if (err) {
18086                 int err2;
18087
18088                 tg3_full_lock(tp, 0);
18089
18090                 tg3_flag_set(tp, INIT_COMPLETE);
18091                 err2 = tg3_restart_hw(tp, true);
18092                 if (err2)
18093                         goto out;
18094
18095                 tg3_timer_start(tp);
18096
18097                 netif_device_attach(dev);
18098                 tg3_netif_start(tp);
18099
18100 out:
18101                 tg3_full_unlock(tp);
18102
18103                 if (!err2)
18104                         tg3_phy_start(tp);
18105         }
18106
18107 unlock:
18108         rtnl_unlock();
18109         return err;
18110 }
18111
18112 static int tg3_resume(struct device *device)
18113 {
18114         struct pci_dev *pdev = to_pci_dev(device);
18115         struct net_device *dev = pci_get_drvdata(pdev);
18116         struct tg3 *tp = netdev_priv(dev);
18117         int err = 0;
18118
18119         rtnl_lock();
18120
18121         if (!netif_running(dev))
18122                 goto unlock;
18123
18124         netif_device_attach(dev);
18125
18126         tg3_full_lock(tp, 0);
18127
18128         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18129
18130         tg3_flag_set(tp, INIT_COMPLETE);
18131         err = tg3_restart_hw(tp,
18132                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18133         if (err)
18134                 goto out;
18135
18136         tg3_timer_start(tp);
18137
18138         tg3_netif_start(tp);
18139
18140 out:
18141         tg3_full_unlock(tp);
18142
18143         if (!err)
18144                 tg3_phy_start(tp);
18145
18146 unlock:
18147         rtnl_unlock();
18148         return err;
18149 }
18150 #endif /* CONFIG_PM_SLEEP */
18151
18152 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18153
18154 static void tg3_shutdown(struct pci_dev *pdev)
18155 {
18156         struct net_device *dev = pci_get_drvdata(pdev);
18157         struct tg3 *tp = netdev_priv(dev);
18158
18159         rtnl_lock();
18160         netif_device_detach(dev);
18161
18162         if (netif_running(dev))
18163                 dev_close(dev);
18164
18165         if (system_state == SYSTEM_POWER_OFF)
18166                 tg3_power_down(tp);
18167
18168         rtnl_unlock();
18169 }
18170
18171 /**
18172  * tg3_io_error_detected - called when PCI error is detected
18173  * @pdev: Pointer to PCI device
18174  * @state: The current pci connection state
18175  *
18176  * This function is called after a PCI bus error affecting
18177  * this device has been detected.
18178  */
18179 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18180                                               pci_channel_state_t state)
18181 {
18182         struct net_device *netdev = pci_get_drvdata(pdev);
18183         struct tg3 *tp = netdev_priv(netdev);
18184         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18185
18186         netdev_info(netdev, "PCI I/O error detected\n");
18187
18188         rtnl_lock();
18189
18190         /* We probably don't have netdev yet */
18191         if (!netdev || !netif_running(netdev))
18192                 goto done;
18193
18194         /* We needn't recover from permanent error */
18195         if (state == pci_channel_io_frozen)
18196                 tp->pcierr_recovery = true;
18197
18198         tg3_phy_stop(tp);
18199
18200         tg3_netif_stop(tp);
18201
18202         tg3_timer_stop(tp);
18203
18204         /* Want to make sure that the reset task doesn't run */
18205         tg3_reset_task_cancel(tp);
18206
18207         netif_device_detach(netdev);
18208
18209         /* Clean up software state, even if MMIO is blocked */
18210         tg3_full_lock(tp, 0);
18211         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18212         tg3_full_unlock(tp);
18213
18214 done:
18215         if (state == pci_channel_io_perm_failure) {
18216                 if (netdev) {
18217                         tg3_napi_enable(tp);
18218                         dev_close(netdev);
18219                 }
18220                 err = PCI_ERS_RESULT_DISCONNECT;
18221         } else {
18222                 pci_disable_device(pdev);
18223         }
18224
18225         rtnl_unlock();
18226
18227         return err;
18228 }
18229
18230 /**
18231  * tg3_io_slot_reset - called after the pci bus has been reset.
18232  * @pdev: Pointer to PCI device
18233  *
18234  * Restart the card from scratch, as if from a cold-boot.
18235  * At this point, the card has exprienced a hard reset,
18236  * followed by fixups by BIOS, and has its config space
18237  * set up identically to what it was at cold boot.
18238  */
18239 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18240 {
18241         struct net_device *netdev = pci_get_drvdata(pdev);
18242         struct tg3 *tp = netdev_priv(netdev);
18243         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18244         int err;
18245
18246         rtnl_lock();
18247
18248         if (pci_enable_device(pdev)) {
18249                 dev_err(&pdev->dev,
18250                         "Cannot re-enable PCI device after reset.\n");
18251                 goto done;
18252         }
18253
18254         pci_set_master(pdev);
18255         pci_restore_state(pdev);
18256         pci_save_state(pdev);
18257
18258         if (!netdev || !netif_running(netdev)) {
18259                 rc = PCI_ERS_RESULT_RECOVERED;
18260                 goto done;
18261         }
18262
18263         err = tg3_power_up(tp);
18264         if (err)
18265                 goto done;
18266
18267         rc = PCI_ERS_RESULT_RECOVERED;
18268
18269 done:
18270         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18271                 tg3_napi_enable(tp);
18272                 dev_close(netdev);
18273         }
18274         rtnl_unlock();
18275
18276         return rc;
18277 }
18278
18279 /**
18280  * tg3_io_resume - called when traffic can start flowing again.
18281  * @pdev: Pointer to PCI device
18282  *
18283  * This callback is called when the error recovery driver tells
18284  * us that its OK to resume normal operation.
18285  */
18286 static void tg3_io_resume(struct pci_dev *pdev)
18287 {
18288         struct net_device *netdev = pci_get_drvdata(pdev);
18289         struct tg3 *tp = netdev_priv(netdev);
18290         int err;
18291
18292         rtnl_lock();
18293
18294         if (!netdev || !netif_running(netdev))
18295                 goto done;
18296
18297         tg3_full_lock(tp, 0);
18298         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18299         tg3_flag_set(tp, INIT_COMPLETE);
18300         err = tg3_restart_hw(tp, true);
18301         if (err) {
18302                 tg3_full_unlock(tp);
18303                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18304                 goto done;
18305         }
18306
18307         netif_device_attach(netdev);
18308
18309         tg3_timer_start(tp);
18310
18311         tg3_netif_start(tp);
18312
18313         tg3_full_unlock(tp);
18314
18315         tg3_phy_start(tp);
18316
18317 done:
18318         tp->pcierr_recovery = false;
18319         rtnl_unlock();
18320 }
18321
18322 static const struct pci_error_handlers tg3_err_handler = {
18323         .error_detected = tg3_io_error_detected,
18324         .slot_reset     = tg3_io_slot_reset,
18325         .resume         = tg3_io_resume
18326 };
18327
18328 static struct pci_driver tg3_driver = {
18329         .name           = DRV_MODULE_NAME,
18330         .id_table       = tg3_pci_tbl,
18331         .probe          = tg3_init_one,
18332         .remove         = tg3_remove_one,
18333         .err_handler    = &tg3_err_handler,
18334         .driver.pm      = &tg3_pm_ops,
18335         .shutdown       = tg3_shutdown,
18336 };
18337
18338 module_pci_driver(tg3_driver);