]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  *
10  * Firmware is:
11  *      Derived from proprietary unpublished source code,
12  *      Copyright (C) 2000-2016 Broadcom Corporation.
13  *      Copyright (C) 2016-2017 Broadcom Ltd.
14  *
15  *      Permission is hereby granted for the distribution of this firmware
16  *      data in hexadecimal or equivalent format, provided this copyright
17  *      notice is accompanying it.
18  */
19
20
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/in.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
42 #include <linux/if.h>
43 #include <linux/if_vlan.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
53
54 #include <net/checksum.h>
55 #include <net/ip.h>
56
57 #include <linux/io.h>
58 #include <asm/byteorder.h>
59 #include <linux/uaccess.h>
60
61 #include <uapi/linux/net_tstamp.h>
62 #include <linux/ptp_clock_kernel.h>
63
64 #ifdef CONFIG_SPARC
65 #include <asm/idprom.h>
66 #include <asm/prom.h>
67 #endif
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 #define TG3_MAJ_NUM                     3
100 #define TG3_MIN_NUM                     137
101 #define DRV_MODULE_VERSION      \
102         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE      "May 11, 2014"
104
105 #define RESET_KIND_SHUTDOWN     0
106 #define RESET_KIND_INIT         1
107 #define RESET_KIND_SUSPEND      2
108
109 #define TG3_DEF_RX_MODE         0
110 #define TG3_DEF_TX_MODE         0
111 #define TG3_DEF_MSG_ENABLE        \
112         (NETIF_MSG_DRV          | \
113          NETIF_MSG_PROBE        | \
114          NETIF_MSG_LINK         | \
115          NETIF_MSG_TIMER        | \
116          NETIF_MSG_IFDOWN       | \
117          NETIF_MSG_IFUP         | \
118          NETIF_MSG_RX_ERR       | \
119          NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
122
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126
127 #define TG3_TX_TIMEOUT                  (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU                     ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING         200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153
154 #define TG3_TX_RING_SIZE                512
155 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
164                                  TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB               64
168
169 #define TG3_RX_STD_DMA_SZ               1536
170 #define TG3_RX_JMB_DMA_SZ               9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD           256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
197 #else
198         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K            2048
210 #define TG3_TX_BD_DMA_MAX_4K            4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
218 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3            "tigon/tg3.bin"
221 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
224
225 static char version[] =
226         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235
236 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
242
243 static const struct pci_device_id tg3_pci_tbl[] = {
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271                         TG3_DRV_DATA_FLAG_5705_10_100},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293                         PCI_VENDOR_ID_LENOVO,
294                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359         {}
360 };
361
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363
364 static const struct {
365         const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367         { "rx_octets" },
368         { "rx_fragments" },
369         { "rx_ucast_packets" },
370         { "rx_mcast_packets" },
371         { "rx_bcast_packets" },
372         { "rx_fcs_errors" },
373         { "rx_align_errors" },
374         { "rx_xon_pause_rcvd" },
375         { "rx_xoff_pause_rcvd" },
376         { "rx_mac_ctrl_rcvd" },
377         { "rx_xoff_entered" },
378         { "rx_frame_too_long_errors" },
379         { "rx_jabbers" },
380         { "rx_undersize_packets" },
381         { "rx_in_length_errors" },
382         { "rx_out_length_errors" },
383         { "rx_64_or_less_octet_packets" },
384         { "rx_65_to_127_octet_packets" },
385         { "rx_128_to_255_octet_packets" },
386         { "rx_256_to_511_octet_packets" },
387         { "rx_512_to_1023_octet_packets" },
388         { "rx_1024_to_1522_octet_packets" },
389         { "rx_1523_to_2047_octet_packets" },
390         { "rx_2048_to_4095_octet_packets" },
391         { "rx_4096_to_8191_octet_packets" },
392         { "rx_8192_to_9022_octet_packets" },
393
394         { "tx_octets" },
395         { "tx_collisions" },
396
397         { "tx_xon_sent" },
398         { "tx_xoff_sent" },
399         { "tx_flow_control" },
400         { "tx_mac_errors" },
401         { "tx_single_collisions" },
402         { "tx_mult_collisions" },
403         { "tx_deferred" },
404         { "tx_excessive_collisions" },
405         { "tx_late_collisions" },
406         { "tx_collide_2times" },
407         { "tx_collide_3times" },
408         { "tx_collide_4times" },
409         { "tx_collide_5times" },
410         { "tx_collide_6times" },
411         { "tx_collide_7times" },
412         { "tx_collide_8times" },
413         { "tx_collide_9times" },
414         { "tx_collide_10times" },
415         { "tx_collide_11times" },
416         { "tx_collide_12times" },
417         { "tx_collide_13times" },
418         { "tx_collide_14times" },
419         { "tx_collide_15times" },
420         { "tx_ucast_packets" },
421         { "tx_mcast_packets" },
422         { "tx_bcast_packets" },
423         { "tx_carrier_sense_errors" },
424         { "tx_discards" },
425         { "tx_errors" },
426
427         { "dma_writeq_full" },
428         { "dma_write_prioq_full" },
429         { "rxbds_empty" },
430         { "rx_discards" },
431         { "rx_errors" },
432         { "rx_threshold_hit" },
433
434         { "dma_readq_full" },
435         { "dma_read_prioq_full" },
436         { "tx_comp_queue_full" },
437
438         { "ring_set_send_prod_index" },
439         { "ring_status_update" },
440         { "nic_irqs" },
441         { "nic_avoided_irqs" },
442         { "nic_tx_threshold_hit" },
443
444         { "mbuf_lwm_thresh_hit" },
445 };
446
447 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST          0
449 #define TG3_LINK_TEST           1
450 #define TG3_REGISTER_TEST       2
451 #define TG3_MEMORY_TEST         3
452 #define TG3_MAC_LOOPB_TEST      4
453 #define TG3_PHY_LOOPB_TEST      5
454 #define TG3_EXT_LOOPB_TEST      6
455 #define TG3_INTERRUPT_TEST      7
456
457
458 static const struct {
459         const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
462         [TG3_LINK_TEST]         = { "link test         (online) " },
463         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
464         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
465         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
466         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
467         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
468         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
469 };
470
471 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
472
473
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476         writel(val, tp->regs + off);
477 }
478
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481         return readl(tp->regs + off);
482 }
483
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486         writel(val, tp->aperegs + off);
487 }
488
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491         return readl(tp->aperegs + off);
492 }
493
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496         unsigned long flags;
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506         writel(val, tp->regs + off);
507         readl(tp->regs + off);
508 }
509
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512         unsigned long flags;
513         u32 val;
514
515         spin_lock_irqsave(&tp->indirect_lock, flags);
516         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518         spin_unlock_irqrestore(&tp->indirect_lock, flags);
519         return val;
520 }
521
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         unsigned long flags;
525
526         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528                                        TG3_64BIT_REG_LOW, val);
529                 return;
530         }
531         if (off == TG3_RX_STD_PROD_IDX_REG) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533                                        TG3_64BIT_REG_LOW, val);
534                 return;
535         }
536
537         spin_lock_irqsave(&tp->indirect_lock, flags);
538         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540         spin_unlock_irqrestore(&tp->indirect_lock, flags);
541
542         /* In indirect mode when disabling interrupts, we also need
543          * to clear the interrupt bit in the GRC local ctrl register.
544          */
545         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546             (val == 0x1)) {
547                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549         }
550 }
551
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554         unsigned long flags;
555         u32 val;
556
557         spin_lock_irqsave(&tp->indirect_lock, flags);
558         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560         spin_unlock_irqrestore(&tp->indirect_lock, flags);
561         return val;
562 }
563
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572                 /* Non-posted methods */
573                 tp->write32(tp, off, val);
574         else {
575                 /* Posted method */
576                 tg3_write32(tp, off, val);
577                 if (usec_wait)
578                         udelay(usec_wait);
579                 tp->read32(tp, off);
580         }
581         /* Wait again after the read for the posted method to guarantee that
582          * the wait time is met.
583          */
584         if (usec_wait)
585                 udelay(usec_wait);
586 }
587
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590         tp->write32_mbox(tp, off, val);
591         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593              !tg3_flag(tp, ICH_WORKAROUND)))
594                 tp->read32_mbox(tp, off);
595 }
596
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599         void __iomem *mbox = tp->regs + off;
600         writel(val, mbox);
601         if (tg3_flag(tp, TXD_MBOX_HWBUG))
602                 writel(val, mbox);
603         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604             tg3_flag(tp, FLUSH_POSTED_WRITES))
605                 readl(mbox);
606 }
607
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610         return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615         writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617
618 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
623
624 #define tw32(reg, val)                  tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)                       tp->read32(tp, reg)
628
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631         unsigned long flags;
632
633         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635                 return;
636
637         spin_lock_irqsave(&tp->indirect_lock, flags);
638         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         } else {
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         }
651         spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656         unsigned long flags;
657
658         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660                 *val = 0;
661                 return;
662         }
663
664         spin_lock_irqsave(&tp->indirect_lock, flags);
665         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668
669                 /* Always leave this as zero. */
670                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         } else {
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673                 *val = tr32(TG3PCI_MEM_WIN_DATA);
674
675                 /* Always leave this as zero. */
676                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677         }
678         spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683         int i;
684         u32 regbase, bit;
685
686         if (tg3_asic_rev(tp) == ASIC_REV_5761)
687                 regbase = TG3_APE_LOCK_GRANT;
688         else
689                 regbase = TG3_APE_PER_LOCK_GRANT;
690
691         /* Make sure the driver hasn't any stale locks. */
692         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693                 switch (i) {
694                 case TG3_APE_LOCK_PHY0:
695                 case TG3_APE_LOCK_PHY1:
696                 case TG3_APE_LOCK_PHY2:
697                 case TG3_APE_LOCK_PHY3:
698                         bit = APE_LOCK_GRANT_DRIVER;
699                         break;
700                 default:
701                         if (!tp->pci_fn)
702                                 bit = APE_LOCK_GRANT_DRIVER;
703                         else
704                                 bit = 1 << tp->pci_fn;
705                 }
706                 tg3_ape_write32(tp, regbase + 4 * i, bit);
707         }
708
709 }
710
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713         int i, off;
714         int ret = 0;
715         u32 status, req, gnt, bit;
716
717         if (!tg3_flag(tp, ENABLE_APE))
718                 return 0;
719
720         switch (locknum) {
721         case TG3_APE_LOCK_GPIO:
722                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723                         return 0;
724         case TG3_APE_LOCK_GRC:
725         case TG3_APE_LOCK_MEM:
726                 if (!tp->pci_fn)
727                         bit = APE_LOCK_REQ_DRIVER;
728                 else
729                         bit = 1 << tp->pci_fn;
730                 break;
731         case TG3_APE_LOCK_PHY0:
732         case TG3_APE_LOCK_PHY1:
733         case TG3_APE_LOCK_PHY2:
734         case TG3_APE_LOCK_PHY3:
735                 bit = APE_LOCK_REQ_DRIVER;
736                 break;
737         default:
738                 return -EINVAL;
739         }
740
741         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
742                 req = TG3_APE_LOCK_REQ;
743                 gnt = TG3_APE_LOCK_GRANT;
744         } else {
745                 req = TG3_APE_PER_LOCK_REQ;
746                 gnt = TG3_APE_PER_LOCK_GRANT;
747         }
748
749         off = 4 * locknum;
750
751         tg3_ape_write32(tp, req + off, bit);
752
753         /* Wait for up to 1 millisecond to acquire lock. */
754         for (i = 0; i < 100; i++) {
755                 status = tg3_ape_read32(tp, gnt + off);
756                 if (status == bit)
757                         break;
758                 if (pci_channel_offline(tp->pdev))
759                         break;
760
761                 udelay(10);
762         }
763
764         if (status != bit) {
765                 /* Revoke the lock request. */
766                 tg3_ape_write32(tp, gnt + off, bit);
767                 ret = -EBUSY;
768         }
769
770         return ret;
771 }
772
773 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
774 {
775         u32 gnt, bit;
776
777         if (!tg3_flag(tp, ENABLE_APE))
778                 return;
779
780         switch (locknum) {
781         case TG3_APE_LOCK_GPIO:
782                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
783                         return;
784         case TG3_APE_LOCK_GRC:
785         case TG3_APE_LOCK_MEM:
786                 if (!tp->pci_fn)
787                         bit = APE_LOCK_GRANT_DRIVER;
788                 else
789                         bit = 1 << tp->pci_fn;
790                 break;
791         case TG3_APE_LOCK_PHY0:
792         case TG3_APE_LOCK_PHY1:
793         case TG3_APE_LOCK_PHY2:
794         case TG3_APE_LOCK_PHY3:
795                 bit = APE_LOCK_GRANT_DRIVER;
796                 break;
797         default:
798                 return;
799         }
800
801         if (tg3_asic_rev(tp) == ASIC_REV_5761)
802                 gnt = TG3_APE_LOCK_GRANT;
803         else
804                 gnt = TG3_APE_PER_LOCK_GRANT;
805
806         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
807 }
808
809 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
810 {
811         u32 apedata;
812
813         while (timeout_us) {
814                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815                         return -EBUSY;
816
817                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
818                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819                         break;
820
821                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822
823                 udelay(10);
824                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825         }
826
827         return timeout_us ? 0 : -EBUSY;
828 }
829
830 #ifdef CONFIG_TIGON3_HWMON
831 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
832 {
833         u32 i, apedata;
834
835         for (i = 0; i < timeout_us / 10; i++) {
836                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
837
838                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
839                         break;
840
841                 udelay(10);
842         }
843
844         return i == timeout_us / 10;
845 }
846
847 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
848                                    u32 len)
849 {
850         int err;
851         u32 i, bufoff, msgoff, maxlen, apedata;
852
853         if (!tg3_flag(tp, APE_HAS_NCSI))
854                 return 0;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
857         if (apedata != APE_SEG_SIG_MAGIC)
858                 return -ENODEV;
859
860         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861         if (!(apedata & APE_FW_STATUS_READY))
862                 return -EAGAIN;
863
864         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
865                  TG3_APE_SHMEM_BASE;
866         msgoff = bufoff + 2 * sizeof(u32);
867         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
868
869         while (len) {
870                 u32 length;
871
872                 /* Cap xfer sizes to scratchpad limits. */
873                 length = (len > maxlen) ? maxlen : len;
874                 len -= length;
875
876                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
877                 if (!(apedata & APE_FW_STATUS_READY))
878                         return -EAGAIN;
879
880                 /* Wait for up to 1 msec for APE to service previous event. */
881                 err = tg3_ape_event_lock(tp, 1000);
882                 if (err)
883                         return err;
884
885                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
886                           APE_EVENT_STATUS_SCRTCHPD_READ |
887                           APE_EVENT_STATUS_EVENT_PENDING;
888                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
889
890                 tg3_ape_write32(tp, bufoff, base_off);
891                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
892
893                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
894                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
895
896                 base_off += length;
897
898                 if (tg3_ape_wait_for_event(tp, 30000))
899                         return -EAGAIN;
900
901                 for (i = 0; length; i += 4, length -= 4) {
902                         u32 val = tg3_ape_read32(tp, msgoff + i);
903                         memcpy(data, &val, sizeof(u32));
904                         data++;
905                 }
906         }
907
908         return 0;
909 }
910 #endif
911
912 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
913 {
914         int err;
915         u32 apedata;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
918         if (apedata != APE_SEG_SIG_MAGIC)
919                 return -EAGAIN;
920
921         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
922         if (!(apedata & APE_FW_STATUS_READY))
923                 return -EAGAIN;
924
925         /* Wait for up to 20 millisecond for APE to service previous event. */
926         err = tg3_ape_event_lock(tp, 20000);
927         if (err)
928                 return err;
929
930         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
931                         event | APE_EVENT_STATUS_EVENT_PENDING);
932
933         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
934         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
935
936         return 0;
937 }
938
939 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
940 {
941         u32 event;
942         u32 apedata;
943
944         if (!tg3_flag(tp, ENABLE_APE))
945                 return;
946
947         switch (kind) {
948         case RESET_KIND_INIT:
949                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
950                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
951                                 APE_HOST_SEG_SIG_MAGIC);
952                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
953                                 APE_HOST_SEG_LEN_MAGIC);
954                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
955                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
956                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
957                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
958                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
959                                 APE_HOST_BEHAV_NO_PHYLOCK);
960                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
961                                     TG3_APE_HOST_DRVR_STATE_START);
962
963                 event = APE_EVENT_STATUS_STATE_START;
964                 break;
965         case RESET_KIND_SHUTDOWN:
966                 if (device_may_wakeup(&tp->pdev->dev) &&
967                     tg3_flag(tp, WOL_ENABLE)) {
968                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
969                                             TG3_APE_HOST_WOL_SPEED_AUTO);
970                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
971                 } else
972                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
973
974                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
975
976                 event = APE_EVENT_STATUS_STATE_UNLOAD;
977                 break;
978         default:
979                 return;
980         }
981
982         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
983
984         tg3_ape_send_event(tp, event);
985 }
986
987 static void tg3_send_ape_heartbeat(struct tg3 *tp,
988                                    unsigned long interval)
989 {
990         /* Check if hb interval has exceeded */
991         if (!tg3_flag(tp, ENABLE_APE) ||
992             time_before(jiffies, tp->ape_hb_jiffies + interval))
993                 return;
994
995         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
996         tp->ape_hb_jiffies = jiffies;
997 }
998
999 static void tg3_disable_ints(struct tg3 *tp)
1000 {
1001         int i;
1002
1003         tw32(TG3PCI_MISC_HOST_CTRL,
1004              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1005         for (i = 0; i < tp->irq_max; i++)
1006                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1007 }
1008
1009 static void tg3_enable_ints(struct tg3 *tp)
1010 {
1011         int i;
1012
1013         tp->irq_sync = 0;
1014         wmb();
1015
1016         tw32(TG3PCI_MISC_HOST_CTRL,
1017              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1018
1019         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1020         for (i = 0; i < tp->irq_cnt; i++) {
1021                 struct tg3_napi *tnapi = &tp->napi[i];
1022
1023                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024                 if (tg3_flag(tp, 1SHOT_MSI))
1025                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026
1027                 tp->coal_now |= tnapi->coal_now;
1028         }
1029
1030         /* Force an initial interrupt */
1031         if (!tg3_flag(tp, TAGGED_STATUS) &&
1032             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1033                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1034         else
1035                 tw32(HOSTCC_MODE, tp->coal_now);
1036
1037         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1038 }
1039
1040 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1041 {
1042         struct tg3 *tp = tnapi->tp;
1043         struct tg3_hw_status *sblk = tnapi->hw_status;
1044         unsigned int work_exists = 0;
1045
1046         /* check for phy events */
1047         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1048                 if (sblk->status & SD_STATUS_LINK_CHG)
1049                         work_exists = 1;
1050         }
1051
1052         /* check for TX work to do */
1053         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1054                 work_exists = 1;
1055
1056         /* check for RX work to do */
1057         if (tnapi->rx_rcb_prod_idx &&
1058             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1059                 work_exists = 1;
1060
1061         return work_exists;
1062 }
1063
1064 /* tg3_int_reenable
1065  *  similar to tg3_enable_ints, but it accurately determines whether there
1066  *  is new work pending and can return without flushing the PIO write
1067  *  which reenables interrupts
1068  */
1069 static void tg3_int_reenable(struct tg3_napi *tnapi)
1070 {
1071         struct tg3 *tp = tnapi->tp;
1072
1073         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1074         mmiowb();
1075
1076         /* When doing tagged status, this work check is unnecessary.
1077          * The last_tag we write above tells the chip which piece of
1078          * work we've completed.
1079          */
1080         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1081                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1082                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1083 }
1084
1085 static void tg3_switch_clocks(struct tg3 *tp)
1086 {
1087         u32 clock_ctrl;
1088         u32 orig_clock_ctrl;
1089
1090         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1091                 return;
1092
1093         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1094
1095         orig_clock_ctrl = clock_ctrl;
1096         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1097                        CLOCK_CTRL_CLKRUN_OENABLE |
1098                        0x1f);
1099         tp->pci_clock_ctrl = clock_ctrl;
1100
1101         if (tg3_flag(tp, 5705_PLUS)) {
1102                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1103                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1105                 }
1106         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1107                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1108                             clock_ctrl |
1109                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1110                             40);
1111                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1112                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1113                             40);
1114         }
1115         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1116 }
1117
1118 #define PHY_BUSY_LOOPS  5000
1119
1120 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1121                          u32 *val)
1122 {
1123         u32 frame_val;
1124         unsigned int loops;
1125         int ret;
1126
1127         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1128                 tw32_f(MAC_MI_MODE,
1129                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1130                 udelay(80);
1131         }
1132
1133         tg3_ape_lock(tp, tp->phy_ape_lock);
1134
1135         *val = 0x0;
1136
1137         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1138                       MI_COM_PHY_ADDR_MASK);
1139         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1140                       MI_COM_REG_ADDR_MASK);
1141         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1142
1143         tw32_f(MAC_MI_COM, frame_val);
1144
1145         loops = PHY_BUSY_LOOPS;
1146         while (loops != 0) {
1147                 udelay(10);
1148                 frame_val = tr32(MAC_MI_COM);
1149
1150                 if ((frame_val & MI_COM_BUSY) == 0) {
1151                         udelay(5);
1152                         frame_val = tr32(MAC_MI_COM);
1153                         break;
1154                 }
1155                 loops -= 1;
1156         }
1157
1158         ret = -EBUSY;
1159         if (loops != 0) {
1160                 *val = frame_val & MI_COM_DATA_MASK;
1161                 ret = 0;
1162         }
1163
1164         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1165                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1166                 udelay(80);
1167         }
1168
1169         tg3_ape_unlock(tp, tp->phy_ape_lock);
1170
1171         return ret;
1172 }
1173
1174 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1175 {
1176         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1177 }
1178
1179 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1180                           u32 val)
1181 {
1182         u32 frame_val;
1183         unsigned int loops;
1184         int ret;
1185
1186         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1187             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1188                 return 0;
1189
1190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191                 tw32_f(MAC_MI_MODE,
1192                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1193                 udelay(80);
1194         }
1195
1196         tg3_ape_lock(tp, tp->phy_ape_lock);
1197
1198         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1199                       MI_COM_PHY_ADDR_MASK);
1200         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1201                       MI_COM_REG_ADDR_MASK);
1202         frame_val |= (val & MI_COM_DATA_MASK);
1203         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1204
1205         tw32_f(MAC_MI_COM, frame_val);
1206
1207         loops = PHY_BUSY_LOOPS;
1208         while (loops != 0) {
1209                 udelay(10);
1210                 frame_val = tr32(MAC_MI_COM);
1211                 if ((frame_val & MI_COM_BUSY) == 0) {
1212                         udelay(5);
1213                         frame_val = tr32(MAC_MI_COM);
1214                         break;
1215                 }
1216                 loops -= 1;
1217         }
1218
1219         ret = -EBUSY;
1220         if (loops != 0)
1221                 ret = 0;
1222
1223         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1224                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1225                 udelay(80);
1226         }
1227
1228         tg3_ape_unlock(tp, tp->phy_ape_lock);
1229
1230         return ret;
1231 }
1232
1233 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1234 {
1235         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1236 }
1237
1238 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1239 {
1240         int err;
1241
1242         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1243         if (err)
1244                 goto done;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1251                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1252         if (err)
1253                 goto done;
1254
1255         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1256
1257 done:
1258         return err;
1259 }
1260
1261 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1262 {
1263         int err;
1264
1265         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1266         if (err)
1267                 goto done;
1268
1269         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1270         if (err)
1271                 goto done;
1272
1273         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1274                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1275         if (err)
1276                 goto done;
1277
1278         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1279
1280 done:
1281         return err;
1282 }
1283
1284 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1285 {
1286         int err;
1287
1288         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289         if (!err)
1290                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292         return err;
1293 }
1294
1295 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1296 {
1297         int err;
1298
1299         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1300         if (!err)
1301                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1302
1303         return err;
1304 }
1305
1306 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1307 {
1308         int err;
1309
1310         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1311                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1312                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1313         if (!err)
1314                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1315
1316         return err;
1317 }
1318
1319 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1320 {
1321         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1322                 set |= MII_TG3_AUXCTL_MISC_WREN;
1323
1324         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1325 }
1326
1327 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1328 {
1329         u32 val;
1330         int err;
1331
1332         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1333
1334         if (err)
1335                 return err;
1336
1337         if (enable)
1338                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339         else
1340                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1341
1342         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1343                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1344
1345         return err;
1346 }
1347
1348 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1349 {
1350         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1351                             reg | val | MII_TG3_MISC_SHDW_WREN);
1352 }
1353
1354 static int tg3_bmcr_reset(struct tg3 *tp)
1355 {
1356         u32 phy_control;
1357         int limit, err;
1358
1359         /* OK, reset it, and poll the BMCR_RESET bit until it
1360          * clears or we time out.
1361          */
1362         phy_control = BMCR_RESET;
1363         err = tg3_writephy(tp, MII_BMCR, phy_control);
1364         if (err != 0)
1365                 return -EBUSY;
1366
1367         limit = 5000;
1368         while (limit--) {
1369                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1370                 if (err != 0)
1371                         return -EBUSY;
1372
1373                 if ((phy_control & BMCR_RESET) == 0) {
1374                         udelay(40);
1375                         break;
1376                 }
1377                 udelay(10);
1378         }
1379         if (limit < 0)
1380                 return -EBUSY;
1381
1382         return 0;
1383 }
1384
1385 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1386 {
1387         struct tg3 *tp = bp->priv;
1388         u32 val;
1389
1390         spin_lock_bh(&tp->lock);
1391
1392         if (__tg3_readphy(tp, mii_id, reg, &val))
1393                 val = -EIO;
1394
1395         spin_unlock_bh(&tp->lock);
1396
1397         return val;
1398 }
1399
1400 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1401 {
1402         struct tg3 *tp = bp->priv;
1403         u32 ret = 0;
1404
1405         spin_lock_bh(&tp->lock);
1406
1407         if (__tg3_writephy(tp, mii_id, reg, val))
1408                 ret = -EIO;
1409
1410         spin_unlock_bh(&tp->lock);
1411
1412         return ret;
1413 }
1414
1415 static void tg3_mdio_config_5785(struct tg3 *tp)
1416 {
1417         u32 val;
1418         struct phy_device *phydev;
1419
1420         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1421         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1422         case PHY_ID_BCM50610:
1423         case PHY_ID_BCM50610M:
1424                 val = MAC_PHYCFG2_50610_LED_MODES;
1425                 break;
1426         case PHY_ID_BCMAC131:
1427                 val = MAC_PHYCFG2_AC131_LED_MODES;
1428                 break;
1429         case PHY_ID_RTL8211C:
1430                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1431                 break;
1432         case PHY_ID_RTL8201E:
1433                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1434                 break;
1435         default:
1436                 return;
1437         }
1438
1439         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1440                 tw32(MAC_PHYCFG2, val);
1441
1442                 val = tr32(MAC_PHYCFG1);
1443                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1444                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1445                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1446                 tw32(MAC_PHYCFG1, val);
1447
1448                 return;
1449         }
1450
1451         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1452                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1453                        MAC_PHYCFG2_FMODE_MASK_MASK |
1454                        MAC_PHYCFG2_GMODE_MASK_MASK |
1455                        MAC_PHYCFG2_ACT_MASK_MASK   |
1456                        MAC_PHYCFG2_QUAL_MASK_MASK |
1457                        MAC_PHYCFG2_INBAND_ENABLE;
1458
1459         tw32(MAC_PHYCFG2, val);
1460
1461         val = tr32(MAC_PHYCFG1);
1462         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1463                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1464         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1465                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1466                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1469         }
1470         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1471                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1472         tw32(MAC_PHYCFG1, val);
1473
1474         val = tr32(MAC_EXT_RGMII_MODE);
1475         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1476                  MAC_RGMII_MODE_RX_QUALITY |
1477                  MAC_RGMII_MODE_RX_ACTIVITY |
1478                  MAC_RGMII_MODE_RX_ENG_DET |
1479                  MAC_RGMII_MODE_TX_ENABLE |
1480                  MAC_RGMII_MODE_TX_LOWPWR |
1481                  MAC_RGMII_MODE_TX_RESET);
1482         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1483                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1484                         val |= MAC_RGMII_MODE_RX_INT_B |
1485                                MAC_RGMII_MODE_RX_QUALITY |
1486                                MAC_RGMII_MODE_RX_ACTIVITY |
1487                                MAC_RGMII_MODE_RX_ENG_DET;
1488                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1489                         val |= MAC_RGMII_MODE_TX_ENABLE |
1490                                MAC_RGMII_MODE_TX_LOWPWR |
1491                                MAC_RGMII_MODE_TX_RESET;
1492         }
1493         tw32(MAC_EXT_RGMII_MODE, val);
1494 }
1495
1496 static void tg3_mdio_start(struct tg3 *tp)
1497 {
1498         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1499         tw32_f(MAC_MI_MODE, tp->mi_mode);
1500         udelay(80);
1501
1502         if (tg3_flag(tp, MDIOBUS_INITED) &&
1503             tg3_asic_rev(tp) == ASIC_REV_5785)
1504                 tg3_mdio_config_5785(tp);
1505 }
1506
1507 static int tg3_mdio_init(struct tg3 *tp)
1508 {
1509         int i;
1510         u32 reg;
1511         struct phy_device *phydev;
1512
1513         if (tg3_flag(tp, 5717_PLUS)) {
1514                 u32 is_serdes;
1515
1516                 tp->phy_addr = tp->pci_fn + 1;
1517
1518                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1519                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1520                 else
1521                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1522                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1523                 if (is_serdes)
1524                         tp->phy_addr += 7;
1525         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1526                 int addr;
1527
1528                 addr = ssb_gige_get_phyaddr(tp->pdev);
1529                 if (addr < 0)
1530                         return addr;
1531                 tp->phy_addr = addr;
1532         } else
1533                 tp->phy_addr = TG3_PHY_MII_ADDR;
1534
1535         tg3_mdio_start(tp);
1536
1537         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1538                 return 0;
1539
1540         tp->mdio_bus = mdiobus_alloc();
1541         if (tp->mdio_bus == NULL)
1542                 return -ENOMEM;
1543
1544         tp->mdio_bus->name     = "tg3 mdio bus";
1545         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1546                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1547         tp->mdio_bus->priv     = tp;
1548         tp->mdio_bus->parent   = &tp->pdev->dev;
1549         tp->mdio_bus->read     = &tg3_mdio_read;
1550         tp->mdio_bus->write    = &tg3_mdio_write;
1551         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1552
1553         /* The bus registration will look for all the PHYs on the mdio bus.
1554          * Unfortunately, it does not ensure the PHY is powered up before
1555          * accessing the PHY ID registers.  A chip reset is the
1556          * quickest way to bring the device back to an operational state..
1557          */
1558         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1559                 tg3_bmcr_reset(tp);
1560
1561         i = mdiobus_register(tp->mdio_bus);
1562         if (i) {
1563                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1564                 mdiobus_free(tp->mdio_bus);
1565                 return i;
1566         }
1567
1568         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1569
1570         if (!phydev || !phydev->drv) {
1571                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1572                 mdiobus_unregister(tp->mdio_bus);
1573                 mdiobus_free(tp->mdio_bus);
1574                 return -ENODEV;
1575         }
1576
1577         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1578         case PHY_ID_BCM57780:
1579                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1580                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581                 break;
1582         case PHY_ID_BCM50610:
1583         case PHY_ID_BCM50610M:
1584                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1585                                      PHY_BRCM_RX_REFCLK_UNUSED |
1586                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1587                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1589                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1590                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1591                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1592                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1593                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1594                 /* fallthru */
1595         case PHY_ID_RTL8211C:
1596                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1597                 break;
1598         case PHY_ID_RTL8201E:
1599         case PHY_ID_BCMAC131:
1600                 phydev->interface = PHY_INTERFACE_MODE_MII;
1601                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1602                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1603                 break;
1604         }
1605
1606         tg3_flag_set(tp, MDIOBUS_INITED);
1607
1608         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1609                 tg3_mdio_config_5785(tp);
1610
1611         return 0;
1612 }
1613
1614 static void tg3_mdio_fini(struct tg3 *tp)
1615 {
1616         if (tg3_flag(tp, MDIOBUS_INITED)) {
1617                 tg3_flag_clear(tp, MDIOBUS_INITED);
1618                 mdiobus_unregister(tp->mdio_bus);
1619                 mdiobus_free(tp->mdio_bus);
1620         }
1621 }
1622
1623 /* tp->lock is held. */
1624 static inline void tg3_generate_fw_event(struct tg3 *tp)
1625 {
1626         u32 val;
1627
1628         val = tr32(GRC_RX_CPU_EVENT);
1629         val |= GRC_RX_CPU_DRIVER_EVENT;
1630         tw32_f(GRC_RX_CPU_EVENT, val);
1631
1632         tp->last_event_jiffies = jiffies;
1633 }
1634
1635 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1636
1637 /* tp->lock is held. */
1638 static void tg3_wait_for_event_ack(struct tg3 *tp)
1639 {
1640         int i;
1641         unsigned int delay_cnt;
1642         long time_remain;
1643
1644         /* If enough time has passed, no wait is necessary. */
1645         time_remain = (long)(tp->last_event_jiffies + 1 +
1646                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1647                       (long)jiffies;
1648         if (time_remain < 0)
1649                 return;
1650
1651         /* Check if we can shorten the wait time. */
1652         delay_cnt = jiffies_to_usecs(time_remain);
1653         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1654                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1655         delay_cnt = (delay_cnt >> 3) + 1;
1656
1657         for (i = 0; i < delay_cnt; i++) {
1658                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1659                         break;
1660                 if (pci_channel_offline(tp->pdev))
1661                         break;
1662
1663                 udelay(8);
1664         }
1665 }
1666
1667 /* tp->lock is held. */
1668 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1669 {
1670         u32 reg, val;
1671
1672         val = 0;
1673         if (!tg3_readphy(tp, MII_BMCR, &reg))
1674                 val = reg << 16;
1675         if (!tg3_readphy(tp, MII_BMSR, &reg))
1676                 val |= (reg & 0xffff);
1677         *data++ = val;
1678
1679         val = 0;
1680         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1681                 val = reg << 16;
1682         if (!tg3_readphy(tp, MII_LPA, &reg))
1683                 val |= (reg & 0xffff);
1684         *data++ = val;
1685
1686         val = 0;
1687         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1688                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1689                         val = reg << 16;
1690                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1691                         val |= (reg & 0xffff);
1692         }
1693         *data++ = val;
1694
1695         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1696                 val = reg << 16;
1697         else
1698                 val = 0;
1699         *data++ = val;
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_ump_link_report(struct tg3 *tp)
1704 {
1705         u32 data[4];
1706
1707         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1708                 return;
1709
1710         tg3_phy_gather_ump_data(tp, data);
1711
1712         tg3_wait_for_event_ack(tp);
1713
1714         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1715         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1716         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1717         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1718         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1719         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1720
1721         tg3_generate_fw_event(tp);
1722 }
1723
1724 /* tp->lock is held. */
1725 static void tg3_stop_fw(struct tg3 *tp)
1726 {
1727         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1728                 /* Wait for RX cpu to ACK the previous event. */
1729                 tg3_wait_for_event_ack(tp);
1730
1731                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1732
1733                 tg3_generate_fw_event(tp);
1734
1735                 /* Wait for RX cpu to ACK this event. */
1736                 tg3_wait_for_event_ack(tp);
1737         }
1738 }
1739
1740 /* tp->lock is held. */
1741 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1742 {
1743         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1744                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1745
1746         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1747                 switch (kind) {
1748                 case RESET_KIND_INIT:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_START);
1751                         break;
1752
1753                 case RESET_KIND_SHUTDOWN:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_UNLOAD);
1756                         break;
1757
1758                 case RESET_KIND_SUSPEND:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_SUSPEND);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767 }
1768
1769 /* tp->lock is held. */
1770 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1771 {
1772         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1773                 switch (kind) {
1774                 case RESET_KIND_INIT:
1775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776                                       DRV_STATE_START_DONE);
1777                         break;
1778
1779                 case RESET_KIND_SHUTDOWN:
1780                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1781                                       DRV_STATE_UNLOAD_DONE);
1782                         break;
1783
1784                 default:
1785                         break;
1786                 }
1787         }
1788 }
1789
1790 /* tp->lock is held. */
1791 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1792 {
1793         if (tg3_flag(tp, ENABLE_ASF)) {
1794                 switch (kind) {
1795                 case RESET_KIND_INIT:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_START);
1798                         break;
1799
1800                 case RESET_KIND_SHUTDOWN:
1801                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1802                                       DRV_STATE_UNLOAD);
1803                         break;
1804
1805                 case RESET_KIND_SUSPEND:
1806                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1807                                       DRV_STATE_SUSPEND);
1808                         break;
1809
1810                 default:
1811                         break;
1812                 }
1813         }
1814 }
1815
1816 static int tg3_poll_fw(struct tg3 *tp)
1817 {
1818         int i;
1819         u32 val;
1820
1821         if (tg3_flag(tp, NO_FWARE_REPORTED))
1822                 return 0;
1823
1824         if (tg3_flag(tp, IS_SSB_CORE)) {
1825                 /* We don't use firmware. */
1826                 return 0;
1827         }
1828
1829         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1830                 /* Wait up to 20ms for init done. */
1831                 for (i = 0; i < 200; i++) {
1832                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1833                                 return 0;
1834                         if (pci_channel_offline(tp->pdev))
1835                                 return -ENODEV;
1836
1837                         udelay(100);
1838                 }
1839                 return -ENODEV;
1840         }
1841
1842         /* Wait for firmware initialization to complete. */
1843         for (i = 0; i < 100000; i++) {
1844                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1845                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1846                         break;
1847                 if (pci_channel_offline(tp->pdev)) {
1848                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1849                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1850                                 netdev_info(tp->dev, "No firmware running\n");
1851                         }
1852
1853                         break;
1854                 }
1855
1856                 udelay(10);
1857         }
1858
1859         /* Chip might not be fitted with firmware.  Some Sun onboard
1860          * parts are configured like that.  So don't signal the timeout
1861          * of the above loop as an error, but do report the lack of
1862          * running firmware once.
1863          */
1864         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1865                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1866
1867                 netdev_info(tp->dev, "No firmware running\n");
1868         }
1869
1870         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1871                 /* The 57765 A0 needs a little more
1872                  * time to do some important work.
1873                  */
1874                 mdelay(10);
1875         }
1876
1877         return 0;
1878 }
1879
1880 static void tg3_link_report(struct tg3 *tp)
1881 {
1882         if (!netif_carrier_ok(tp->dev)) {
1883                 netif_info(tp, link, tp->dev, "Link is down\n");
1884                 tg3_ump_link_report(tp);
1885         } else if (netif_msg_link(tp)) {
1886                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1887                             (tp->link_config.active_speed == SPEED_1000 ?
1888                              1000 :
1889                              (tp->link_config.active_speed == SPEED_100 ?
1890                               100 : 10)),
1891                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1892                              "full" : "half"));
1893
1894                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1895                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1896                             "on" : "off",
1897                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1898                             "on" : "off");
1899
1900                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1901                         netdev_info(tp->dev, "EEE is %s\n",
1902                                     tp->setlpicnt ? "enabled" : "disabled");
1903
1904                 tg3_ump_link_report(tp);
1905         }
1906
1907         tp->link_up = netif_carrier_ok(tp->dev);
1908 }
1909
1910 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1911 {
1912         u32 flowctrl = 0;
1913
1914         if (adv & ADVERTISE_PAUSE_CAP) {
1915                 flowctrl |= FLOW_CTRL_RX;
1916                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1917                         flowctrl |= FLOW_CTRL_TX;
1918         } else if (adv & ADVERTISE_PAUSE_ASYM)
1919                 flowctrl |= FLOW_CTRL_TX;
1920
1921         return flowctrl;
1922 }
1923
1924 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1925 {
1926         u16 miireg;
1927
1928         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1929                 miireg = ADVERTISE_1000XPAUSE;
1930         else if (flow_ctrl & FLOW_CTRL_TX)
1931                 miireg = ADVERTISE_1000XPSE_ASYM;
1932         else if (flow_ctrl & FLOW_CTRL_RX)
1933                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1934         else
1935                 miireg = 0;
1936
1937         return miireg;
1938 }
1939
1940 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1941 {
1942         u32 flowctrl = 0;
1943
1944         if (adv & ADVERTISE_1000XPAUSE) {
1945                 flowctrl |= FLOW_CTRL_RX;
1946                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1947                         flowctrl |= FLOW_CTRL_TX;
1948         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1949                 flowctrl |= FLOW_CTRL_TX;
1950
1951         return flowctrl;
1952 }
1953
1954 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1955 {
1956         u8 cap = 0;
1957
1958         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1959                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1960         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1961                 if (lcladv & ADVERTISE_1000XPAUSE)
1962                         cap = FLOW_CTRL_RX;
1963                 if (rmtadv & ADVERTISE_1000XPAUSE)
1964                         cap = FLOW_CTRL_TX;
1965         }
1966
1967         return cap;
1968 }
1969
1970 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1971 {
1972         u8 autoneg;
1973         u8 flowctrl = 0;
1974         u32 old_rx_mode = tp->rx_mode;
1975         u32 old_tx_mode = tp->tx_mode;
1976
1977         if (tg3_flag(tp, USE_PHYLIB))
1978                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1979         else
1980                 autoneg = tp->link_config.autoneg;
1981
1982         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1983                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1984                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1985                 else
1986                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1987         } else
1988                 flowctrl = tp->link_config.flowctrl;
1989
1990         tp->link_config.active_flowctrl = flowctrl;
1991
1992         if (flowctrl & FLOW_CTRL_RX)
1993                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1994         else
1995                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1996
1997         if (old_rx_mode != tp->rx_mode)
1998                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1999
2000         if (flowctrl & FLOW_CTRL_TX)
2001                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2002         else
2003                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2004
2005         if (old_tx_mode != tp->tx_mode)
2006                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2007 }
2008
2009 static void tg3_adjust_link(struct net_device *dev)
2010 {
2011         u8 oldflowctrl, linkmesg = 0;
2012         u32 mac_mode, lcl_adv, rmt_adv;
2013         struct tg3 *tp = netdev_priv(dev);
2014         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2015
2016         spin_lock_bh(&tp->lock);
2017
2018         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2019                                     MAC_MODE_HALF_DUPLEX);
2020
2021         oldflowctrl = tp->link_config.active_flowctrl;
2022
2023         if (phydev->link) {
2024                 lcl_adv = 0;
2025                 rmt_adv = 0;
2026
2027                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2028                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2029                 else if (phydev->speed == SPEED_1000 ||
2030                          tg3_asic_rev(tp) != ASIC_REV_5785)
2031                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2032                 else
2033                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2034
2035                 if (phydev->duplex == DUPLEX_HALF)
2036                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2037                 else {
2038                         lcl_adv = mii_advertise_flowctrl(
2039                                   tp->link_config.flowctrl);
2040
2041                         if (phydev->pause)
2042                                 rmt_adv = LPA_PAUSE_CAP;
2043                         if (phydev->asym_pause)
2044                                 rmt_adv |= LPA_PAUSE_ASYM;
2045                 }
2046
2047                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2048         } else
2049                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2050
2051         if (mac_mode != tp->mac_mode) {
2052                 tp->mac_mode = mac_mode;
2053                 tw32_f(MAC_MODE, tp->mac_mode);
2054                 udelay(40);
2055         }
2056
2057         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2058                 if (phydev->speed == SPEED_10)
2059                         tw32(MAC_MI_STAT,
2060                              MAC_MI_STAT_10MBPS_MODE |
2061                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2062                 else
2063                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2064         }
2065
2066         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2067                 tw32(MAC_TX_LENGTHS,
2068                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069                       (6 << TX_LENGTHS_IPG_SHIFT) |
2070                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071         else
2072                 tw32(MAC_TX_LENGTHS,
2073                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2074                       (6 << TX_LENGTHS_IPG_SHIFT) |
2075                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2076
2077         if (phydev->link != tp->old_link ||
2078             phydev->speed != tp->link_config.active_speed ||
2079             phydev->duplex != tp->link_config.active_duplex ||
2080             oldflowctrl != tp->link_config.active_flowctrl)
2081                 linkmesg = 1;
2082
2083         tp->old_link = phydev->link;
2084         tp->link_config.active_speed = phydev->speed;
2085         tp->link_config.active_duplex = phydev->duplex;
2086
2087         spin_unlock_bh(&tp->lock);
2088
2089         if (linkmesg)
2090                 tg3_link_report(tp);
2091 }
2092
2093 static int tg3_phy_init(struct tg3 *tp)
2094 {
2095         struct phy_device *phydev;
2096
2097         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2098                 return 0;
2099
2100         /* Bring the PHY back to a known state. */
2101         tg3_bmcr_reset(tp);
2102
2103         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2104
2105         /* Attach the MAC to the PHY. */
2106         phydev = phy_connect(tp->dev, phydev_name(phydev),
2107                              tg3_adjust_link, phydev->interface);
2108         if (IS_ERR(phydev)) {
2109                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2110                 return PTR_ERR(phydev);
2111         }
2112
2113         /* Mask with MAC supported features. */
2114         switch (phydev->interface) {
2115         case PHY_INTERFACE_MODE_GMII:
2116         case PHY_INTERFACE_MODE_RGMII:
2117                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2118                         phydev->supported &= (PHY_GBIT_FEATURES |
2119                                               SUPPORTED_Pause |
2120                                               SUPPORTED_Asym_Pause);
2121                         break;
2122                 }
2123                 /* fallthru */
2124         case PHY_INTERFACE_MODE_MII:
2125                 phydev->supported &= (PHY_BASIC_FEATURES |
2126                                       SUPPORTED_Pause |
2127                                       SUPPORTED_Asym_Pause);
2128                 break;
2129         default:
2130                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2131                 return -EINVAL;
2132         }
2133
2134         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2135
2136         phydev->advertising = phydev->supported;
2137
2138         phy_attached_info(phydev);
2139
2140         return 0;
2141 }
2142
2143 static void tg3_phy_start(struct tg3 *tp)
2144 {
2145         struct phy_device *phydev;
2146
2147         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2148                 return;
2149
2150         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2151
2152         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2153                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2154                 phydev->speed = tp->link_config.speed;
2155                 phydev->duplex = tp->link_config.duplex;
2156                 phydev->autoneg = tp->link_config.autoneg;
2157                 phydev->advertising = tp->link_config.advertising;
2158         }
2159
2160         phy_start(phydev);
2161
2162         phy_start_aneg(phydev);
2163 }
2164
2165 static void tg3_phy_stop(struct tg3 *tp)
2166 {
2167         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2168                 return;
2169
2170         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2171 }
2172
2173 static void tg3_phy_fini(struct tg3 *tp)
2174 {
2175         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2176                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2177                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2178         }
2179 }
2180
2181 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2182 {
2183         int err;
2184         u32 val;
2185
2186         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2187                 return 0;
2188
2189         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2190                 /* Cannot do read-modify-write on 5401 */
2191                 err = tg3_phy_auxctl_write(tp,
2192                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2193                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2194                                            0x4c20);
2195                 goto done;
2196         }
2197
2198         err = tg3_phy_auxctl_read(tp,
2199                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2200         if (err)
2201                 return err;
2202
2203         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2204         err = tg3_phy_auxctl_write(tp,
2205                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2206
2207 done:
2208         return err;
2209 }
2210
2211 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2212 {
2213         u32 phytest;
2214
2215         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2216                 u32 phy;
2217
2218                 tg3_writephy(tp, MII_TG3_FET_TEST,
2219                              phytest | MII_TG3_FET_SHADOW_EN);
2220                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2221                         if (enable)
2222                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2223                         else
2224                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2225                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2226                 }
2227                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2228         }
2229 }
2230
2231 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2232 {
2233         u32 reg;
2234
2235         if (!tg3_flag(tp, 5705_PLUS) ||
2236             (tg3_flag(tp, 5717_PLUS) &&
2237              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2238                 return;
2239
2240         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2241                 tg3_phy_fet_toggle_apd(tp, enable);
2242                 return;
2243         }
2244
2245         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2246               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2247               MII_TG3_MISC_SHDW_SCR5_SDTL |
2248               MII_TG3_MISC_SHDW_SCR5_C125OE;
2249         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2250                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2251
2252         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2253
2254
2255         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2256         if (enable)
2257                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2258
2259         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2260 }
2261
2262 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2263 {
2264         u32 phy;
2265
2266         if (!tg3_flag(tp, 5705_PLUS) ||
2267             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2268                 return;
2269
2270         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2271                 u32 ephy;
2272
2273                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2274                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2275
2276                         tg3_writephy(tp, MII_TG3_FET_TEST,
2277                                      ephy | MII_TG3_FET_SHADOW_EN);
2278                         if (!tg3_readphy(tp, reg, &phy)) {
2279                                 if (enable)
2280                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2281                                 else
2282                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2283                                 tg3_writephy(tp, reg, phy);
2284                         }
2285                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2286                 }
2287         } else {
2288                 int ret;
2289
2290                 ret = tg3_phy_auxctl_read(tp,
2291                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2292                 if (!ret) {
2293                         if (enable)
2294                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2295                         else
2296                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2297                         tg3_phy_auxctl_write(tp,
2298                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2299                 }
2300         }
2301 }
2302
2303 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2304 {
2305         int ret;
2306         u32 val;
2307
2308         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2309                 return;
2310
2311         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2312         if (!ret)
2313                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2314                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2315 }
2316
2317 static void tg3_phy_apply_otp(struct tg3 *tp)
2318 {
2319         u32 otp, phy;
2320
2321         if (!tp->phy_otp)
2322                 return;
2323
2324         otp = tp->phy_otp;
2325
2326         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2327                 return;
2328
2329         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2330         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2331         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2332
2333         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2334               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2335         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2336
2337         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2338         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2339         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2340
2341         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2342         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2343
2344         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2345         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2346
2347         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2348               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2349         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2350
2351         tg3_phy_toggle_auxctl_smdsp(tp, false);
2352 }
2353
2354 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2355 {
2356         u32 val;
2357         struct ethtool_eee *dest = &tp->eee;
2358
2359         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2360                 return;
2361
2362         if (eee)
2363                 dest = eee;
2364
2365         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2366                 return;
2367
2368         /* Pull eee_active */
2369         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2370             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2371                 dest->eee_active = 1;
2372         } else
2373                 dest->eee_active = 0;
2374
2375         /* Pull lp advertised settings */
2376         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2377                 return;
2378         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2379
2380         /* Pull advertised and eee_enabled settings */
2381         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2382                 return;
2383         dest->eee_enabled = !!val;
2384         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2385
2386         /* Pull tx_lpi_enabled */
2387         val = tr32(TG3_CPMU_EEE_MODE);
2388         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2389
2390         /* Pull lpi timer value */
2391         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2392 }
2393
2394 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2395 {
2396         u32 val;
2397
2398         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2399                 return;
2400
2401         tp->setlpicnt = 0;
2402
2403         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2404             current_link_up &&
2405             tp->link_config.active_duplex == DUPLEX_FULL &&
2406             (tp->link_config.active_speed == SPEED_100 ||
2407              tp->link_config.active_speed == SPEED_1000)) {
2408                 u32 eeectl;
2409
2410                 if (tp->link_config.active_speed == SPEED_1000)
2411                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2412                 else
2413                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2414
2415                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2416
2417                 tg3_eee_pull_config(tp, NULL);
2418                 if (tp->eee.eee_active)
2419                         tp->setlpicnt = 2;
2420         }
2421
2422         if (!tp->setlpicnt) {
2423                 if (current_link_up &&
2424                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2425                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2426                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2427                 }
2428
2429                 val = tr32(TG3_CPMU_EEE_MODE);
2430                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2431         }
2432 }
2433
2434 static void tg3_phy_eee_enable(struct tg3 *tp)
2435 {
2436         u32 val;
2437
2438         if (tp->link_config.active_speed == SPEED_1000 &&
2439             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2440              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2441              tg3_flag(tp, 57765_CLASS)) &&
2442             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2443                 val = MII_TG3_DSP_TAP26_ALNOKO |
2444                       MII_TG3_DSP_TAP26_RMRXSTO;
2445                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2446                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2447         }
2448
2449         val = tr32(TG3_CPMU_EEE_MODE);
2450         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2451 }
2452
2453 static int tg3_wait_macro_done(struct tg3 *tp)
2454 {
2455         int limit = 100;
2456
2457         while (limit--) {
2458                 u32 tmp32;
2459
2460                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2461                         if ((tmp32 & 0x1000) == 0)
2462                                 break;
2463                 }
2464         }
2465         if (limit < 0)
2466                 return -EBUSY;
2467
2468         return 0;
2469 }
2470
2471 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2472 {
2473         static const u32 test_pat[4][6] = {
2474         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2475         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2476         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2477         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2478         };
2479         int chan;
2480
2481         for (chan = 0; chan < 4; chan++) {
2482                 int i;
2483
2484                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485                              (chan * 0x2000) | 0x0200);
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2487
2488                 for (i = 0; i < 6; i++)
2489                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2490                                      test_pat[chan][i]);
2491
2492                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2493                 if (tg3_wait_macro_done(tp)) {
2494                         *resetp = 1;
2495                         return -EBUSY;
2496                 }
2497
2498                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2499                              (chan * 0x2000) | 0x0200);
2500                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2501                 if (tg3_wait_macro_done(tp)) {
2502                         *resetp = 1;
2503                         return -EBUSY;
2504                 }
2505
2506                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2507                 if (tg3_wait_macro_done(tp)) {
2508                         *resetp = 1;
2509                         return -EBUSY;
2510                 }
2511
2512                 for (i = 0; i < 6; i += 2) {
2513                         u32 low, high;
2514
2515                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2516                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2517                             tg3_wait_macro_done(tp)) {
2518                                 *resetp = 1;
2519                                 return -EBUSY;
2520                         }
2521                         low &= 0x7fff;
2522                         high &= 0x000f;
2523                         if (low != test_pat[chan][i] ||
2524                             high != test_pat[chan][i+1]) {
2525                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2526                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2527                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2528
2529                                 return -EBUSY;
2530                         }
2531                 }
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2538 {
2539         int chan;
2540
2541         for (chan = 0; chan < 4; chan++) {
2542                 int i;
2543
2544                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2545                              (chan * 0x2000) | 0x0200);
2546                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2547                 for (i = 0; i < 6; i++)
2548                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2549                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2550                 if (tg3_wait_macro_done(tp))
2551                         return -EBUSY;
2552         }
2553
2554         return 0;
2555 }
2556
2557 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2558 {
2559         u32 reg32, phy9_orig;
2560         int retries, do_phy_reset, err;
2561
2562         retries = 10;
2563         do_phy_reset = 1;
2564         do {
2565                 if (do_phy_reset) {
2566                         err = tg3_bmcr_reset(tp);
2567                         if (err)
2568                                 return err;
2569                         do_phy_reset = 0;
2570                 }
2571
2572                 /* Disable transmitter and interrupt.  */
2573                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2574                         continue;
2575
2576                 reg32 |= 0x3000;
2577                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2578
2579                 /* Set full-duplex, 1000 mbps.  */
2580                 tg3_writephy(tp, MII_BMCR,
2581                              BMCR_FULLDPLX | BMCR_SPEED1000);
2582
2583                 /* Set to master mode.  */
2584                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2585                         continue;
2586
2587                 tg3_writephy(tp, MII_CTRL1000,
2588                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2589
2590                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2591                 if (err)
2592                         return err;
2593
2594                 /* Block the PHY control access.  */
2595                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2596
2597                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2598                 if (!err)
2599                         break;
2600         } while (--retries);
2601
2602         err = tg3_phy_reset_chanpat(tp);
2603         if (err)
2604                 return err;
2605
2606         tg3_phydsp_write(tp, 0x8005, 0x0000);
2607
2608         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2609         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2610
2611         tg3_phy_toggle_auxctl_smdsp(tp, false);
2612
2613         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2614
2615         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2616         if (err)
2617                 return err;
2618
2619         reg32 &= ~0x3000;
2620         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2621
2622         return 0;
2623 }
2624
2625 static void tg3_carrier_off(struct tg3 *tp)
2626 {
2627         netif_carrier_off(tp->dev);
2628         tp->link_up = false;
2629 }
2630
2631 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2632 {
2633         if (tg3_flag(tp, ENABLE_ASF))
2634                 netdev_warn(tp->dev,
2635                             "Management side-band traffic will be interrupted during phy settings change\n");
2636 }
2637
2638 /* This will reset the tigon3 PHY if there is no valid
2639  * link unless the FORCE argument is non-zero.
2640  */
2641 static int tg3_phy_reset(struct tg3 *tp)
2642 {
2643         u32 val, cpmuctrl;
2644         int err;
2645
2646         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2647                 val = tr32(GRC_MISC_CFG);
2648                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2649                 udelay(40);
2650         }
2651         err  = tg3_readphy(tp, MII_BMSR, &val);
2652         err |= tg3_readphy(tp, MII_BMSR, &val);
2653         if (err != 0)
2654                 return -EBUSY;
2655
2656         if (netif_running(tp->dev) && tp->link_up) {
2657                 netif_carrier_off(tp->dev);
2658                 tg3_link_report(tp);
2659         }
2660
2661         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2662             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2663             tg3_asic_rev(tp) == ASIC_REV_5705) {
2664                 err = tg3_phy_reset_5703_4_5(tp);
2665                 if (err)
2666                         return err;
2667                 goto out;
2668         }
2669
2670         cpmuctrl = 0;
2671         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2672             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2673                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2674                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2675                         tw32(TG3_CPMU_CTRL,
2676                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2677         }
2678
2679         err = tg3_bmcr_reset(tp);
2680         if (err)
2681                 return err;
2682
2683         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2684                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2685                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2686
2687                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2688         }
2689
2690         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2691             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2692                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2693                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2694                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2695                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2696                         udelay(40);
2697                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2698                 }
2699         }
2700
2701         if (tg3_flag(tp, 5717_PLUS) &&
2702             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2703                 return 0;
2704
2705         tg3_phy_apply_otp(tp);
2706
2707         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2708                 tg3_phy_toggle_apd(tp, true);
2709         else
2710                 tg3_phy_toggle_apd(tp, false);
2711
2712 out:
2713         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2714             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2716                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2717                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718         }
2719
2720         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2721                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2722                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2723         }
2724
2725         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2726                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2728                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2729                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2730                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2731                 }
2732         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2733                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2734                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2735                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2736                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2737                                 tg3_writephy(tp, MII_TG3_TEST1,
2738                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2739                         } else
2740                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2741
2742                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2743                 }
2744         }
2745
2746         /* Set Extended packet length bit (bit 14) on all chips that */
2747         /* support jumbo frames */
2748         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2749                 /* Cannot do read-modify-write on 5401 */
2750                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2751         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752                 /* Set bit 14 with read-modify-write to preserve other bits */
2753                 err = tg3_phy_auxctl_read(tp,
2754                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2755                 if (!err)
2756                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2757                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2758         }
2759
2760         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2761          * jumbo frames transmission.
2762          */
2763         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2764                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2765                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2766                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2767         }
2768
2769         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2770                 /* adjust output voltage */
2771                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2772         }
2773
2774         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2775                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2776
2777         tg3_phy_toggle_automdix(tp, true);
2778         tg3_phy_set_wirespeed(tp);
2779         return 0;
2780 }
2781
2782 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2783 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2784 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2785                                           TG3_GPIO_MSG_NEED_VAUX)
2786 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2787         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2788          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2789          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2790          (TG3_GPIO_MSG_DRVR_PRES << 12))
2791
2792 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2793         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2794          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2795          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2796          (TG3_GPIO_MSG_NEED_VAUX << 12))
2797
2798 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2799 {
2800         u32 status, shift;
2801
2802         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2803             tg3_asic_rev(tp) == ASIC_REV_5719)
2804                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2805         else
2806                 status = tr32(TG3_CPMU_DRV_STATUS);
2807
2808         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2809         status &= ~(TG3_GPIO_MSG_MASK << shift);
2810         status |= (newstat << shift);
2811
2812         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813             tg3_asic_rev(tp) == ASIC_REV_5719)
2814                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2815         else
2816                 tw32(TG3_CPMU_DRV_STATUS, status);
2817
2818         return status >> TG3_APE_GPIO_MSG_SHIFT;
2819 }
2820
2821 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2822 {
2823         if (!tg3_flag(tp, IS_NIC))
2824                 return 0;
2825
2826         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2827             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2828             tg3_asic_rev(tp) == ASIC_REV_5720) {
2829                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2830                         return -EIO;
2831
2832                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2833
2834                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2835                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2838         } else {
2839                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2840                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2841         }
2842
2843         return 0;
2844 }
2845
2846 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2847 {
2848         u32 grc_local_ctrl;
2849
2850         if (!tg3_flag(tp, IS_NIC) ||
2851             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2852             tg3_asic_rev(tp) == ASIC_REV_5701)
2853                 return;
2854
2855         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2856
2857         tw32_wait_f(GRC_LOCAL_CTRL,
2858                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2860
2861         tw32_wait_f(GRC_LOCAL_CTRL,
2862                     grc_local_ctrl,
2863                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2864
2865         tw32_wait_f(GRC_LOCAL_CTRL,
2866                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2867                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2868 }
2869
2870 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2871 {
2872         if (!tg3_flag(tp, IS_NIC))
2873                 return;
2874
2875         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2876             tg3_asic_rev(tp) == ASIC_REV_5701) {
2877                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2878                             (GRC_LCLCTRL_GPIO_OE0 |
2879                              GRC_LCLCTRL_GPIO_OE1 |
2880                              GRC_LCLCTRL_GPIO_OE2 |
2881                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2882                              GRC_LCLCTRL_GPIO_OUTPUT1),
2883                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2884         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2885                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2886                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2887                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2888                                      GRC_LCLCTRL_GPIO_OE1 |
2889                                      GRC_LCLCTRL_GPIO_OE2 |
2890                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2891                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2892                                      tp->grc_local_ctrl;
2893                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895
2896                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2897                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2898                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2899
2900                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2901                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2902                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2903         } else {
2904                 u32 no_gpio2;
2905                 u32 grc_local_ctrl = 0;
2906
2907                 /* Workaround to prevent overdrawing Amps. */
2908                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2909                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2910                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2911                                     grc_local_ctrl,
2912                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2913                 }
2914
2915                 /* On 5753 and variants, GPIO2 cannot be used. */
2916                 no_gpio2 = tp->nic_sram_data_cfg &
2917                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2918
2919                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2920                                   GRC_LCLCTRL_GPIO_OE1 |
2921                                   GRC_LCLCTRL_GPIO_OE2 |
2922                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2923                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2924                 if (no_gpio2) {
2925                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2926                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2927                 }
2928                 tw32_wait_f(GRC_LOCAL_CTRL,
2929                             tp->grc_local_ctrl | grc_local_ctrl,
2930                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2931
2932                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2933
2934                 tw32_wait_f(GRC_LOCAL_CTRL,
2935                             tp->grc_local_ctrl | grc_local_ctrl,
2936                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2937
2938                 if (!no_gpio2) {
2939                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2940                         tw32_wait_f(GRC_LOCAL_CTRL,
2941                                     tp->grc_local_ctrl | grc_local_ctrl,
2942                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2943                 }
2944         }
2945 }
2946
2947 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2948 {
2949         u32 msg = 0;
2950
2951         /* Serialize power state transitions */
2952         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2953                 return;
2954
2955         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2956                 msg = TG3_GPIO_MSG_NEED_VAUX;
2957
2958         msg = tg3_set_function_status(tp, msg);
2959
2960         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2961                 goto done;
2962
2963         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2964                 tg3_pwrsrc_switch_to_vaux(tp);
2965         else
2966                 tg3_pwrsrc_die_with_vmain(tp);
2967
2968 done:
2969         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2970 }
2971
2972 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2973 {
2974         bool need_vaux = false;
2975
2976         /* The GPIOs do something completely different on 57765. */
2977         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2978                 return;
2979
2980         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2981             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2982             tg3_asic_rev(tp) == ASIC_REV_5720) {
2983                 tg3_frob_aux_power_5717(tp, include_wol ?
2984                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2985                 return;
2986         }
2987
2988         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2989                 struct net_device *dev_peer;
2990
2991                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2992
2993                 /* remove_one() may have been run on the peer. */
2994                 if (dev_peer) {
2995                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2996
2997                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2998                                 return;
2999
3000                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3001                             tg3_flag(tp_peer, ENABLE_ASF))
3002                                 need_vaux = true;
3003                 }
3004         }
3005
3006         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3007             tg3_flag(tp, ENABLE_ASF))
3008                 need_vaux = true;
3009
3010         if (need_vaux)
3011                 tg3_pwrsrc_switch_to_vaux(tp);
3012         else
3013                 tg3_pwrsrc_die_with_vmain(tp);
3014 }
3015
3016 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3017 {
3018         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3019                 return 1;
3020         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3021                 if (speed != SPEED_10)
3022                         return 1;
3023         } else if (speed == SPEED_10)
3024                 return 1;
3025
3026         return 0;
3027 }
3028
3029 static bool tg3_phy_power_bug(struct tg3 *tp)
3030 {
3031         switch (tg3_asic_rev(tp)) {
3032         case ASIC_REV_5700:
3033         case ASIC_REV_5704:
3034                 return true;
3035         case ASIC_REV_5780:
3036                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3037                         return true;
3038                 return false;
3039         case ASIC_REV_5717:
3040                 if (!tp->pci_fn)
3041                         return true;
3042                 return false;
3043         case ASIC_REV_5719:
3044         case ASIC_REV_5720:
3045                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3046                     !tp->pci_fn)
3047                         return true;
3048                 return false;
3049         }
3050
3051         return false;
3052 }
3053
3054 static bool tg3_phy_led_bug(struct tg3 *tp)
3055 {
3056         switch (tg3_asic_rev(tp)) {
3057         case ASIC_REV_5719:
3058         case ASIC_REV_5720:
3059                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3060                     !tp->pci_fn)
3061                         return true;
3062                 return false;
3063         }
3064
3065         return false;
3066 }
3067
3068 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3069 {
3070         u32 val;
3071
3072         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3073                 return;
3074
3075         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3076                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3077                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3078                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3079
3080                         sg_dig_ctrl |=
3081                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3082                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3083                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3084                 }
3085                 return;
3086         }
3087
3088         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3089                 tg3_bmcr_reset(tp);
3090                 val = tr32(GRC_MISC_CFG);
3091                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3092                 udelay(40);
3093                 return;
3094         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3095                 u32 phytest;
3096                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3097                         u32 phy;
3098
3099                         tg3_writephy(tp, MII_ADVERTISE, 0);
3100                         tg3_writephy(tp, MII_BMCR,
3101                                      BMCR_ANENABLE | BMCR_ANRESTART);
3102
3103                         tg3_writephy(tp, MII_TG3_FET_TEST,
3104                                      phytest | MII_TG3_FET_SHADOW_EN);
3105                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3106                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3107                                 tg3_writephy(tp,
3108                                              MII_TG3_FET_SHDW_AUXMODE4,
3109                                              phy);
3110                         }
3111                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3112                 }
3113                 return;
3114         } else if (do_low_power) {
3115                 if (!tg3_phy_led_bug(tp))
3116                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3117                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3118
3119                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3120                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3121                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3122                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3123         }
3124
3125         /* The PHY should not be powered down on some chips because
3126          * of bugs.
3127          */
3128         if (tg3_phy_power_bug(tp))
3129                 return;
3130
3131         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3132             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3133                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3134                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3135                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3136                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3137         }
3138
3139         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3140 }
3141
3142 /* tp->lock is held. */
3143 static int tg3_nvram_lock(struct tg3 *tp)
3144 {
3145         if (tg3_flag(tp, NVRAM)) {
3146                 int i;
3147
3148                 if (tp->nvram_lock_cnt == 0) {
3149                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3150                         for (i = 0; i < 8000; i++) {
3151                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3152                                         break;
3153                                 udelay(20);
3154                         }
3155                         if (i == 8000) {
3156                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3157                                 return -ENODEV;
3158                         }
3159                 }
3160                 tp->nvram_lock_cnt++;
3161         }
3162         return 0;
3163 }
3164
3165 /* tp->lock is held. */
3166 static void tg3_nvram_unlock(struct tg3 *tp)
3167 {
3168         if (tg3_flag(tp, NVRAM)) {
3169                 if (tp->nvram_lock_cnt > 0)
3170                         tp->nvram_lock_cnt--;
3171                 if (tp->nvram_lock_cnt == 0)
3172                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3173         }
3174 }
3175
3176 /* tp->lock is held. */
3177 static void tg3_enable_nvram_access(struct tg3 *tp)
3178 {
3179         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3180                 u32 nvaccess = tr32(NVRAM_ACCESS);
3181
3182                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3183         }
3184 }
3185
3186 /* tp->lock is held. */
3187 static void tg3_disable_nvram_access(struct tg3 *tp)
3188 {
3189         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3190                 u32 nvaccess = tr32(NVRAM_ACCESS);
3191
3192                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3193         }
3194 }
3195
3196 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3197                                         u32 offset, u32 *val)
3198 {
3199         u32 tmp;
3200         int i;
3201
3202         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3203                 return -EINVAL;
3204
3205         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3206                                         EEPROM_ADDR_DEVID_MASK |
3207                                         EEPROM_ADDR_READ);
3208         tw32(GRC_EEPROM_ADDR,
3209              tmp |
3210              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3211              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3212               EEPROM_ADDR_ADDR_MASK) |
3213              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3214
3215         for (i = 0; i < 1000; i++) {
3216                 tmp = tr32(GRC_EEPROM_ADDR);
3217
3218                 if (tmp & EEPROM_ADDR_COMPLETE)
3219                         break;
3220                 msleep(1);
3221         }
3222         if (!(tmp & EEPROM_ADDR_COMPLETE))
3223                 return -EBUSY;
3224
3225         tmp = tr32(GRC_EEPROM_DATA);
3226
3227         /*
3228          * The data will always be opposite the native endian
3229          * format.  Perform a blind byteswap to compensate.
3230          */
3231         *val = swab32(tmp);
3232
3233         return 0;
3234 }
3235
3236 #define NVRAM_CMD_TIMEOUT 10000
3237
3238 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3239 {
3240         int i;
3241
3242         tw32(NVRAM_CMD, nvram_cmd);
3243         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3244                 usleep_range(10, 40);
3245                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3246                         udelay(10);
3247                         break;
3248                 }
3249         }
3250
3251         if (i == NVRAM_CMD_TIMEOUT)
3252                 return -EBUSY;
3253
3254         return 0;
3255 }
3256
3257 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3258 {
3259         if (tg3_flag(tp, NVRAM) &&
3260             tg3_flag(tp, NVRAM_BUFFERED) &&
3261             tg3_flag(tp, FLASH) &&
3262             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3263             (tp->nvram_jedecnum == JEDEC_ATMEL))
3264
3265                 addr = ((addr / tp->nvram_pagesize) <<
3266                         ATMEL_AT45DB0X1B_PAGE_POS) +
3267                        (addr % tp->nvram_pagesize);
3268
3269         return addr;
3270 }
3271
3272 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3273 {
3274         if (tg3_flag(tp, NVRAM) &&
3275             tg3_flag(tp, NVRAM_BUFFERED) &&
3276             tg3_flag(tp, FLASH) &&
3277             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3278             (tp->nvram_jedecnum == JEDEC_ATMEL))
3279
3280                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3281                         tp->nvram_pagesize) +
3282                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3283
3284         return addr;
3285 }
3286
3287 /* NOTE: Data read in from NVRAM is byteswapped according to
3288  * the byteswapping settings for all other register accesses.
3289  * tg3 devices are BE devices, so on a BE machine, the data
3290  * returned will be exactly as it is seen in NVRAM.  On a LE
3291  * machine, the 32-bit value will be byteswapped.
3292  */
3293 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3294 {
3295         int ret;
3296
3297         if (!tg3_flag(tp, NVRAM))
3298                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3299
3300         offset = tg3_nvram_phys_addr(tp, offset);
3301
3302         if (offset > NVRAM_ADDR_MSK)
3303                 return -EINVAL;
3304
3305         ret = tg3_nvram_lock(tp);
3306         if (ret)
3307                 return ret;
3308
3309         tg3_enable_nvram_access(tp);
3310
3311         tw32(NVRAM_ADDR, offset);
3312         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3313                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3314
3315         if (ret == 0)
3316                 *val = tr32(NVRAM_RDDATA);
3317
3318         tg3_disable_nvram_access(tp);
3319
3320         tg3_nvram_unlock(tp);
3321
3322         return ret;
3323 }
3324
3325 /* Ensures NVRAM data is in bytestream format. */
3326 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3327 {
3328         u32 v;
3329         int res = tg3_nvram_read(tp, offset, &v);
3330         if (!res)
3331                 *val = cpu_to_be32(v);
3332         return res;
3333 }
3334
3335 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3336                                     u32 offset, u32 len, u8 *buf)
3337 {
3338         int i, j, rc = 0;
3339         u32 val;
3340
3341         for (i = 0; i < len; i += 4) {
3342                 u32 addr;
3343                 __be32 data;
3344
3345                 addr = offset + i;
3346
3347                 memcpy(&data, buf + i, 4);
3348
3349                 /*
3350                  * The SEEPROM interface expects the data to always be opposite
3351                  * the native endian format.  We accomplish this by reversing
3352                  * all the operations that would have been performed on the
3353                  * data from a call to tg3_nvram_read_be32().
3354                  */
3355                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3356
3357                 val = tr32(GRC_EEPROM_ADDR);
3358                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3359
3360                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3361                         EEPROM_ADDR_READ);
3362                 tw32(GRC_EEPROM_ADDR, val |
3363                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3364                         (addr & EEPROM_ADDR_ADDR_MASK) |
3365                         EEPROM_ADDR_START |
3366                         EEPROM_ADDR_WRITE);
3367
3368                 for (j = 0; j < 1000; j++) {
3369                         val = tr32(GRC_EEPROM_ADDR);
3370
3371                         if (val & EEPROM_ADDR_COMPLETE)
3372                                 break;
3373                         msleep(1);
3374                 }
3375                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3376                         rc = -EBUSY;
3377                         break;
3378                 }
3379         }
3380
3381         return rc;
3382 }
3383
3384 /* offset and length are dword aligned */
3385 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3386                 u8 *buf)
3387 {
3388         int ret = 0;
3389         u32 pagesize = tp->nvram_pagesize;
3390         u32 pagemask = pagesize - 1;
3391         u32 nvram_cmd;
3392         u8 *tmp;
3393
3394         tmp = kmalloc(pagesize, GFP_KERNEL);
3395         if (tmp == NULL)
3396                 return -ENOMEM;
3397
3398         while (len) {
3399                 int j;
3400                 u32 phy_addr, page_off, size;
3401
3402                 phy_addr = offset & ~pagemask;
3403
3404                 for (j = 0; j < pagesize; j += 4) {
3405                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3406                                                   (__be32 *) (tmp + j));
3407                         if (ret)
3408                                 break;
3409                 }
3410                 if (ret)
3411                         break;
3412
3413                 page_off = offset & pagemask;
3414                 size = pagesize;
3415                 if (len < size)
3416                         size = len;
3417
3418                 len -= size;
3419
3420                 memcpy(tmp + page_off, buf, size);
3421
3422                 offset = offset + (pagesize - page_off);
3423
3424                 tg3_enable_nvram_access(tp);
3425
3426                 /*
3427                  * Before we can erase the flash page, we need
3428                  * to issue a special "write enable" command.
3429                  */
3430                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3431
3432                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433                         break;
3434
3435                 /* Erase the target page */
3436                 tw32(NVRAM_ADDR, phy_addr);
3437
3438                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3439                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3440
3441                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3442                         break;
3443
3444                 /* Issue another write enable to start the write. */
3445                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3446
3447                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3448                         break;
3449
3450                 for (j = 0; j < pagesize; j += 4) {
3451                         __be32 data;
3452
3453                         data = *((__be32 *) (tmp + j));
3454
3455                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3456
3457                         tw32(NVRAM_ADDR, phy_addr + j);
3458
3459                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3460                                 NVRAM_CMD_WR;
3461
3462                         if (j == 0)
3463                                 nvram_cmd |= NVRAM_CMD_FIRST;
3464                         else if (j == (pagesize - 4))
3465                                 nvram_cmd |= NVRAM_CMD_LAST;
3466
3467                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3468                         if (ret)
3469                                 break;
3470                 }
3471                 if (ret)
3472                         break;
3473         }
3474
3475         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3476         tg3_nvram_exec_cmd(tp, nvram_cmd);
3477
3478         kfree(tmp);
3479
3480         return ret;
3481 }
3482
3483 /* offset and length are dword aligned */
3484 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3485                 u8 *buf)
3486 {
3487         int i, ret = 0;
3488
3489         for (i = 0; i < len; i += 4, offset += 4) {
3490                 u32 page_off, phy_addr, nvram_cmd;
3491                 __be32 data;
3492
3493                 memcpy(&data, buf + i, 4);
3494                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3495
3496                 page_off = offset % tp->nvram_pagesize;
3497
3498                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3499
3500                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3501
3502                 if (page_off == 0 || i == 0)
3503                         nvram_cmd |= NVRAM_CMD_FIRST;
3504                 if (page_off == (tp->nvram_pagesize - 4))
3505                         nvram_cmd |= NVRAM_CMD_LAST;
3506
3507                 if (i == (len - 4))
3508                         nvram_cmd |= NVRAM_CMD_LAST;
3509
3510                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3511                     !tg3_flag(tp, FLASH) ||
3512                     !tg3_flag(tp, 57765_PLUS))
3513                         tw32(NVRAM_ADDR, phy_addr);
3514
3515                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3516                     !tg3_flag(tp, 5755_PLUS) &&
3517                     (tp->nvram_jedecnum == JEDEC_ST) &&
3518                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3519                         u32 cmd;
3520
3521                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3522                         ret = tg3_nvram_exec_cmd(tp, cmd);
3523                         if (ret)
3524                                 break;
3525                 }
3526                 if (!tg3_flag(tp, FLASH)) {
3527                         /* We always do complete word writes to eeprom. */
3528                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3529                 }
3530
3531                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3532                 if (ret)
3533                         break;
3534         }
3535         return ret;
3536 }
3537
3538 /* offset and length are dword aligned */
3539 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3540 {
3541         int ret;
3542
3543         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3544                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3545                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3546                 udelay(40);
3547         }
3548
3549         if (!tg3_flag(tp, NVRAM)) {
3550                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3551         } else {
3552                 u32 grc_mode;
3553
3554                 ret = tg3_nvram_lock(tp);
3555                 if (ret)
3556                         return ret;
3557
3558                 tg3_enable_nvram_access(tp);
3559                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3560                         tw32(NVRAM_WRITE1, 0x406);
3561
3562                 grc_mode = tr32(GRC_MODE);
3563                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3564
3565                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3566                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3567                                 buf);
3568                 } else {
3569                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3570                                 buf);
3571                 }
3572
3573                 grc_mode = tr32(GRC_MODE);
3574                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3575
3576                 tg3_disable_nvram_access(tp);
3577                 tg3_nvram_unlock(tp);
3578         }
3579
3580         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3581                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3582                 udelay(40);
3583         }
3584
3585         return ret;
3586 }
3587
3588 #define RX_CPU_SCRATCH_BASE     0x30000
3589 #define RX_CPU_SCRATCH_SIZE     0x04000
3590 #define TX_CPU_SCRATCH_BASE     0x34000
3591 #define TX_CPU_SCRATCH_SIZE     0x04000
3592
3593 /* tp->lock is held. */
3594 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596         int i;
3597         const int iters = 10000;
3598
3599         for (i = 0; i < iters; i++) {
3600                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3601                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3602                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3603                         break;
3604                 if (pci_channel_offline(tp->pdev))
3605                         return -EBUSY;
3606         }
3607
3608         return (i == iters) ? -EBUSY : 0;
3609 }
3610
3611 /* tp->lock is held. */
3612 static int tg3_rxcpu_pause(struct tg3 *tp)
3613 {
3614         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3615
3616         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3617         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3618         udelay(10);
3619
3620         return rc;
3621 }
3622
3623 /* tp->lock is held. */
3624 static int tg3_txcpu_pause(struct tg3 *tp)
3625 {
3626         return tg3_pause_cpu(tp, TX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
3630 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632         tw32(cpu_base + CPU_STATE, 0xffffffff);
3633         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3634 }
3635
3636 /* tp->lock is held. */
3637 static void tg3_rxcpu_resume(struct tg3 *tp)
3638 {
3639         tg3_resume_cpu(tp, RX_CPU_BASE);
3640 }
3641
3642 /* tp->lock is held. */
3643 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3644 {
3645         int rc;
3646
3647         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3648
3649         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3650                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3651
3652                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3653                 return 0;
3654         }
3655         if (cpu_base == RX_CPU_BASE) {
3656                 rc = tg3_rxcpu_pause(tp);
3657         } else {
3658                 /*
3659                  * There is only an Rx CPU for the 5750 derivative in the
3660                  * BCM4785.
3661                  */
3662                 if (tg3_flag(tp, IS_SSB_CORE))
3663                         return 0;
3664
3665                 rc = tg3_txcpu_pause(tp);
3666         }
3667
3668         if (rc) {
3669                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3670                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3671                 return -ENODEV;
3672         }
3673
3674         /* Clear firmware's nvram arbitration. */
3675         if (tg3_flag(tp, NVRAM))
3676                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3677         return 0;
3678 }
3679
3680 static int tg3_fw_data_len(struct tg3 *tp,
3681                            const struct tg3_firmware_hdr *fw_hdr)
3682 {
3683         int fw_len;
3684
3685         /* Non fragmented firmware have one firmware header followed by a
3686          * contiguous chunk of data to be written. The length field in that
3687          * header is not the length of data to be written but the complete
3688          * length of the bss. The data length is determined based on
3689          * tp->fw->size minus headers.
3690          *
3691          * Fragmented firmware have a main header followed by multiple
3692          * fragments. Each fragment is identical to non fragmented firmware
3693          * with a firmware header followed by a contiguous chunk of data. In
3694          * the main header, the length field is unused and set to 0xffffffff.
3695          * In each fragment header the length is the entire size of that
3696          * fragment i.e. fragment data + header length. Data length is
3697          * therefore length field in the header minus TG3_FW_HDR_LEN.
3698          */
3699         if (tp->fw_len == 0xffffffff)
3700                 fw_len = be32_to_cpu(fw_hdr->len);
3701         else
3702                 fw_len = tp->fw->size;
3703
3704         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3705 }
3706
3707 /* tp->lock is held. */
3708 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3709                                  u32 cpu_scratch_base, int cpu_scratch_size,
3710                                  const struct tg3_firmware_hdr *fw_hdr)
3711 {
3712         int err, i;
3713         void (*write_op)(struct tg3 *, u32, u32);
3714         int total_len = tp->fw->size;
3715
3716         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3717                 netdev_err(tp->dev,
3718                            "%s: Trying to load TX cpu firmware which is 5705\n",
3719                            __func__);
3720                 return -EINVAL;
3721         }
3722
3723         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3724                 write_op = tg3_write_mem;
3725         else
3726                 write_op = tg3_write_indirect_reg32;
3727
3728         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3729                 /* It is possible that bootcode is still loading at this point.
3730                  * Get the nvram lock first before halting the cpu.
3731                  */
3732                 int lock_err = tg3_nvram_lock(tp);
3733                 err = tg3_halt_cpu(tp, cpu_base);
3734                 if (!lock_err)
3735                         tg3_nvram_unlock(tp);
3736                 if (err)
3737                         goto out;
3738
3739                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3740                         write_op(tp, cpu_scratch_base + i, 0);
3741                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3742                 tw32(cpu_base + CPU_MODE,
3743                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3744         } else {
3745                 /* Subtract additional main header for fragmented firmware and
3746                  * advance to the first fragment
3747                  */
3748                 total_len -= TG3_FW_HDR_LEN;
3749                 fw_hdr++;
3750         }
3751
3752         do {
3753                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3754                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3755                         write_op(tp, cpu_scratch_base +
3756                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3757                                      (i * sizeof(u32)),
3758                                  be32_to_cpu(fw_data[i]));
3759
3760                 total_len -= be32_to_cpu(fw_hdr->len);
3761
3762                 /* Advance to next fragment */
3763                 fw_hdr = (struct tg3_firmware_hdr *)
3764                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3765         } while (total_len > 0);
3766
3767         err = 0;
3768
3769 out:
3770         return err;
3771 }
3772
3773 /* tp->lock is held. */
3774 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3775 {
3776         int i;
3777         const int iters = 5;
3778
3779         tw32(cpu_base + CPU_STATE, 0xffffffff);
3780         tw32_f(cpu_base + CPU_PC, pc);
3781
3782         for (i = 0; i < iters; i++) {
3783                 if (tr32(cpu_base + CPU_PC) == pc)
3784                         break;
3785                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3786                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3787                 tw32_f(cpu_base + CPU_PC, pc);
3788                 udelay(1000);
3789         }
3790
3791         return (i == iters) ? -EBUSY : 0;
3792 }
3793
3794 /* tp->lock is held. */
3795 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3796 {
3797         const struct tg3_firmware_hdr *fw_hdr;
3798         int err;
3799
3800         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3801
3802         /* Firmware blob starts with version numbers, followed by
3803            start address and length. We are setting complete length.
3804            length = end_address_of_bss - start_address_of_text.
3805            Remainder is the blob to be loaded contiguously
3806            from start address. */
3807
3808         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3809                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3810                                     fw_hdr);
3811         if (err)
3812                 return err;
3813
3814         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3815                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3816                                     fw_hdr);
3817         if (err)
3818                 return err;
3819
3820         /* Now startup only the RX cpu. */
3821         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3822                                        be32_to_cpu(fw_hdr->base_addr));
3823         if (err) {
3824                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3825                            "should be %08x\n", __func__,
3826                            tr32(RX_CPU_BASE + CPU_PC),
3827                                 be32_to_cpu(fw_hdr->base_addr));
3828                 return -ENODEV;
3829         }
3830
3831         tg3_rxcpu_resume(tp);
3832
3833         return 0;
3834 }
3835
3836 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3837 {
3838         const int iters = 1000;
3839         int i;
3840         u32 val;
3841
3842         /* Wait for boot code to complete initialization and enter service
3843          * loop. It is then safe to download service patches
3844          */
3845         for (i = 0; i < iters; i++) {
3846                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3847                         break;
3848
3849                 udelay(10);
3850         }
3851
3852         if (i == iters) {
3853                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3854                 return -EBUSY;
3855         }
3856
3857         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3858         if (val & 0xff) {
3859                 netdev_warn(tp->dev,
3860                             "Other patches exist. Not downloading EEE patch\n");
3861                 return -EEXIST;
3862         }
3863
3864         return 0;
3865 }
3866
3867 /* tp->lock is held. */
3868 static void tg3_load_57766_firmware(struct tg3 *tp)
3869 {
3870         struct tg3_firmware_hdr *fw_hdr;
3871
3872         if (!tg3_flag(tp, NO_NVRAM))
3873                 return;
3874
3875         if (tg3_validate_rxcpu_state(tp))
3876                 return;
3877
3878         if (!tp->fw)
3879                 return;
3880
3881         /* This firmware blob has a different format than older firmware
3882          * releases as given below. The main difference is we have fragmented
3883          * data to be written to non-contiguous locations.
3884          *
3885          * In the beginning we have a firmware header identical to other
3886          * firmware which consists of version, base addr and length. The length
3887          * here is unused and set to 0xffffffff.
3888          *
3889          * This is followed by a series of firmware fragments which are
3890          * individually identical to previous firmware. i.e. they have the
3891          * firmware header and followed by data for that fragment. The version
3892          * field of the individual fragment header is unused.
3893          */
3894
3895         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3896         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3897                 return;
3898
3899         if (tg3_rxcpu_pause(tp))
3900                 return;
3901
3902         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3903         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3904
3905         tg3_rxcpu_resume(tp);
3906 }
3907
3908 /* tp->lock is held. */
3909 static int tg3_load_tso_firmware(struct tg3 *tp)
3910 {
3911         const struct tg3_firmware_hdr *fw_hdr;
3912         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3913         int err;
3914
3915         if (!tg3_flag(tp, FW_TSO))
3916                 return 0;
3917
3918         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3919
3920         /* Firmware blob starts with version numbers, followed by
3921            start address and length. We are setting complete length.
3922            length = end_address_of_bss - start_address_of_text.
3923            Remainder is the blob to be loaded contiguously
3924            from start address. */
3925
3926         cpu_scratch_size = tp->fw_len;
3927
3928         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3929                 cpu_base = RX_CPU_BASE;
3930                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3931         } else {
3932                 cpu_base = TX_CPU_BASE;
3933                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3934                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3935         }
3936
3937         err = tg3_load_firmware_cpu(tp, cpu_base,
3938                                     cpu_scratch_base, cpu_scratch_size,
3939                                     fw_hdr);
3940         if (err)
3941                 return err;
3942
3943         /* Now startup the cpu. */
3944         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3945                                        be32_to_cpu(fw_hdr->base_addr));
3946         if (err) {
3947                 netdev_err(tp->dev,
3948                            "%s fails to set CPU PC, is %08x should be %08x\n",
3949                            __func__, tr32(cpu_base + CPU_PC),
3950                            be32_to_cpu(fw_hdr->base_addr));
3951                 return -ENODEV;
3952         }
3953
3954         tg3_resume_cpu(tp, cpu_base);
3955         return 0;
3956 }
3957
3958 /* tp->lock is held. */
3959 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3960 {
3961         u32 addr_high, addr_low;
3962
3963         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3964         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3965                     (mac_addr[4] <<  8) | mac_addr[5]);
3966
3967         if (index < 4) {
3968                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3969                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3970         } else {
3971                 index -= 4;
3972                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3973                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3974         }
3975 }
3976
3977 /* tp->lock is held. */
3978 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3979 {
3980         u32 addr_high;
3981         int i;
3982
3983         for (i = 0; i < 4; i++) {
3984                 if (i == 1 && skip_mac_1)
3985                         continue;
3986                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3987         }
3988
3989         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3990             tg3_asic_rev(tp) == ASIC_REV_5704) {
3991                 for (i = 4; i < 16; i++)
3992                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3993         }
3994
3995         addr_high = (tp->dev->dev_addr[0] +
3996                      tp->dev->dev_addr[1] +
3997                      tp->dev->dev_addr[2] +
3998                      tp->dev->dev_addr[3] +
3999                      tp->dev->dev_addr[4] +
4000                      tp->dev->dev_addr[5]) &
4001                 TX_BACKOFF_SEED_MASK;
4002         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4003 }
4004
4005 static void tg3_enable_register_access(struct tg3 *tp)
4006 {
4007         /*
4008          * Make sure register accesses (indirect or otherwise) will function
4009          * correctly.
4010          */
4011         pci_write_config_dword(tp->pdev,
4012                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4013 }
4014
4015 static int tg3_power_up(struct tg3 *tp)
4016 {
4017         int err;
4018
4019         tg3_enable_register_access(tp);
4020
4021         err = pci_set_power_state(tp->pdev, PCI_D0);
4022         if (!err) {
4023                 /* Switch out of Vaux if it is a NIC */
4024                 tg3_pwrsrc_switch_to_vmain(tp);
4025         } else {
4026                 netdev_err(tp->dev, "Transition to D0 failed\n");
4027         }
4028
4029         return err;
4030 }
4031
4032 static int tg3_setup_phy(struct tg3 *, bool);
4033
4034 static int tg3_power_down_prepare(struct tg3 *tp)
4035 {
4036         u32 misc_host_ctrl;
4037         bool device_should_wake, do_low_power;
4038
4039         tg3_enable_register_access(tp);
4040
4041         /* Restore the CLKREQ setting. */
4042         if (tg3_flag(tp, CLKREQ_BUG))
4043                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4044                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4045
4046         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4047         tw32(TG3PCI_MISC_HOST_CTRL,
4048              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4049
4050         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4051                              tg3_flag(tp, WOL_ENABLE);
4052
4053         if (tg3_flag(tp, USE_PHYLIB)) {
4054                 do_low_power = false;
4055                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4056                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4057                         struct phy_device *phydev;
4058                         u32 phyid, advertising;
4059
4060                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4061
4062                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4063
4064                         tp->link_config.speed = phydev->speed;
4065                         tp->link_config.duplex = phydev->duplex;
4066                         tp->link_config.autoneg = phydev->autoneg;
4067                         tp->link_config.advertising = phydev->advertising;
4068
4069                         advertising = ADVERTISED_TP |
4070                                       ADVERTISED_Pause |
4071                                       ADVERTISED_Autoneg |
4072                                       ADVERTISED_10baseT_Half;
4073
4074                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4075                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4076                                         advertising |=
4077                                                 ADVERTISED_100baseT_Half |
4078                                                 ADVERTISED_100baseT_Full |
4079                                                 ADVERTISED_10baseT_Full;
4080                                 else
4081                                         advertising |= ADVERTISED_10baseT_Full;
4082                         }
4083
4084                         phydev->advertising = advertising;
4085
4086                         phy_start_aneg(phydev);
4087
4088                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4089                         if (phyid != PHY_ID_BCMAC131) {
4090                                 phyid &= PHY_BCM_OUI_MASK;
4091                                 if (phyid == PHY_BCM_OUI_1 ||
4092                                     phyid == PHY_BCM_OUI_2 ||
4093                                     phyid == PHY_BCM_OUI_3)
4094                                         do_low_power = true;
4095                         }
4096                 }
4097         } else {
4098                 do_low_power = true;
4099
4100                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4101                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4102
4103                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4104                         tg3_setup_phy(tp, false);
4105         }
4106
4107         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4108                 u32 val;
4109
4110                 val = tr32(GRC_VCPU_EXT_CTRL);
4111                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4112         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4113                 int i;
4114                 u32 val;
4115
4116                 for (i = 0; i < 200; i++) {
4117                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4118                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4119                                 break;
4120                         msleep(1);
4121                 }
4122         }
4123         if (tg3_flag(tp, WOL_CAP))
4124                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4125                                                      WOL_DRV_STATE_SHUTDOWN |
4126                                                      WOL_DRV_WOL |
4127                                                      WOL_SET_MAGIC_PKT);
4128
4129         if (device_should_wake) {
4130                 u32 mac_mode;
4131
4132                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4133                         if (do_low_power &&
4134                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4135                                 tg3_phy_auxctl_write(tp,
4136                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4137                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4138                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4139                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4140                                 udelay(40);
4141                         }
4142
4143                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4144                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4145                         else if (tp->phy_flags &
4146                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4147                                 if (tp->link_config.active_speed == SPEED_1000)
4148                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4149                                 else
4150                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4151                         } else
4152                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4153
4154                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4155                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4156                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4157                                              SPEED_100 : SPEED_10;
4158                                 if (tg3_5700_link_polarity(tp, speed))
4159                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4160                                 else
4161                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4162                         }
4163                 } else {
4164                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4165                 }
4166
4167                 if (!tg3_flag(tp, 5750_PLUS))
4168                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4169
4170                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4171                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4172                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4173                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4174
4175                 if (tg3_flag(tp, ENABLE_APE))
4176                         mac_mode |= MAC_MODE_APE_TX_EN |
4177                                     MAC_MODE_APE_RX_EN |
4178                                     MAC_MODE_TDE_ENABLE;
4179
4180                 tw32_f(MAC_MODE, mac_mode);
4181                 udelay(100);
4182
4183                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4184                 udelay(10);
4185         }
4186
4187         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4188             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4189              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4190                 u32 base_val;
4191
4192                 base_val = tp->pci_clock_ctrl;
4193                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4194                              CLOCK_CTRL_TXCLK_DISABLE);
4195
4196                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4197                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4198         } else if (tg3_flag(tp, 5780_CLASS) ||
4199                    tg3_flag(tp, CPMU_PRESENT) ||
4200                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4201                 /* do nothing */
4202         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4203                 u32 newbits1, newbits2;
4204
4205                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4206                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4207                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4208                                     CLOCK_CTRL_TXCLK_DISABLE |
4209                                     CLOCK_CTRL_ALTCLK);
4210                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4211                 } else if (tg3_flag(tp, 5705_PLUS)) {
4212                         newbits1 = CLOCK_CTRL_625_CORE;
4213                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4214                 } else {
4215                         newbits1 = CLOCK_CTRL_ALTCLK;
4216                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4217                 }
4218
4219                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4220                             40);
4221
4222                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4223                             40);
4224
4225                 if (!tg3_flag(tp, 5705_PLUS)) {
4226                         u32 newbits3;
4227
4228                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4229                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4230                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4231                                             CLOCK_CTRL_TXCLK_DISABLE |
4232                                             CLOCK_CTRL_44MHZ_CORE);
4233                         } else {
4234                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4235                         }
4236
4237                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4238                                     tp->pci_clock_ctrl | newbits3, 40);
4239                 }
4240         }
4241
4242         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4243                 tg3_power_down_phy(tp, do_low_power);
4244
4245         tg3_frob_aux_power(tp, true);
4246
4247         /* Workaround for unstable PLL clock */
4248         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4249             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4250              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4251                 u32 val = tr32(0x7d00);
4252
4253                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4254                 tw32(0x7d00, val);
4255                 if (!tg3_flag(tp, ENABLE_ASF)) {
4256                         int err;
4257
4258                         err = tg3_nvram_lock(tp);
4259                         tg3_halt_cpu(tp, RX_CPU_BASE);
4260                         if (!err)
4261                                 tg3_nvram_unlock(tp);
4262                 }
4263         }
4264
4265         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4266
4267         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4268
4269         return 0;
4270 }
4271
4272 static void tg3_power_down(struct tg3 *tp)
4273 {
4274         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4275         pci_set_power_state(tp->pdev, PCI_D3hot);
4276 }
4277
4278 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4279 {
4280         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4281         case MII_TG3_AUX_STAT_10HALF:
4282                 *speed = SPEED_10;
4283                 *duplex = DUPLEX_HALF;
4284                 break;
4285
4286         case MII_TG3_AUX_STAT_10FULL:
4287                 *speed = SPEED_10;
4288                 *duplex = DUPLEX_FULL;
4289                 break;
4290
4291         case MII_TG3_AUX_STAT_100HALF:
4292                 *speed = SPEED_100;
4293                 *duplex = DUPLEX_HALF;
4294                 break;
4295
4296         case MII_TG3_AUX_STAT_100FULL:
4297                 *speed = SPEED_100;
4298                 *duplex = DUPLEX_FULL;
4299                 break;
4300
4301         case MII_TG3_AUX_STAT_1000HALF:
4302                 *speed = SPEED_1000;
4303                 *duplex = DUPLEX_HALF;
4304                 break;
4305
4306         case MII_TG3_AUX_STAT_1000FULL:
4307                 *speed = SPEED_1000;
4308                 *duplex = DUPLEX_FULL;
4309                 break;
4310
4311         default:
4312                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4313                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4314                                  SPEED_10;
4315                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4316                                   DUPLEX_HALF;
4317                         break;
4318                 }
4319                 *speed = SPEED_UNKNOWN;
4320                 *duplex = DUPLEX_UNKNOWN;
4321                 break;
4322         }
4323 }
4324
4325 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4326 {
4327         int err = 0;
4328         u32 val, new_adv;
4329
4330         new_adv = ADVERTISE_CSMA;
4331         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4332         new_adv |= mii_advertise_flowctrl(flowctrl);
4333
4334         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4335         if (err)
4336                 goto done;
4337
4338         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4339                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4340
4341                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4342                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4343                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4344
4345                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4346                 if (err)
4347                         goto done;
4348         }
4349
4350         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4351                 goto done;
4352
4353         tw32(TG3_CPMU_EEE_MODE,
4354              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4355
4356         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4357         if (!err) {
4358                 u32 err2;
4359
4360                 val = 0;
4361                 /* Advertise 100-BaseTX EEE ability */
4362                 if (advertise & ADVERTISED_100baseT_Full)
4363                         val |= MDIO_AN_EEE_ADV_100TX;
4364                 /* Advertise 1000-BaseT EEE ability */
4365                 if (advertise & ADVERTISED_1000baseT_Full)
4366                         val |= MDIO_AN_EEE_ADV_1000T;
4367
4368                 if (!tp->eee.eee_enabled) {
4369                         val = 0;
4370                         tp->eee.advertised = 0;
4371                 } else {
4372                         tp->eee.advertised = advertise &
4373                                              (ADVERTISED_100baseT_Full |
4374                                               ADVERTISED_1000baseT_Full);
4375                 }
4376
4377                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4378                 if (err)
4379                         val = 0;
4380
4381                 switch (tg3_asic_rev(tp)) {
4382                 case ASIC_REV_5717:
4383                 case ASIC_REV_57765:
4384                 case ASIC_REV_57766:
4385                 case ASIC_REV_5719:
4386                         /* If we advertised any eee advertisements above... */
4387                         if (val)
4388                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4389                                       MII_TG3_DSP_TAP26_RMRXSTO |
4390                                       MII_TG3_DSP_TAP26_OPCSINPT;
4391                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4392                         /* Fall through */
4393                 case ASIC_REV_5720:
4394                 case ASIC_REV_5762:
4395                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4396                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4397                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4398                 }
4399
4400                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4401                 if (!err)
4402                         err = err2;
4403         }
4404
4405 done:
4406         return err;
4407 }
4408
4409 static void tg3_phy_copper_begin(struct tg3 *tp)
4410 {
4411         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4412             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4413                 u32 adv, fc;
4414
4415                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4416                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4417                         adv = ADVERTISED_10baseT_Half |
4418                               ADVERTISED_10baseT_Full;
4419                         if (tg3_flag(tp, WOL_SPEED_100MB))
4420                                 adv |= ADVERTISED_100baseT_Half |
4421                                        ADVERTISED_100baseT_Full;
4422                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4423                                 if (!(tp->phy_flags &
4424                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4425                                         adv |= ADVERTISED_1000baseT_Half;
4426                                 adv |= ADVERTISED_1000baseT_Full;
4427                         }
4428
4429                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4430                 } else {
4431                         adv = tp->link_config.advertising;
4432                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4433                                 adv &= ~(ADVERTISED_1000baseT_Half |
4434                                          ADVERTISED_1000baseT_Full);
4435
4436                         fc = tp->link_config.flowctrl;
4437                 }
4438
4439                 tg3_phy_autoneg_cfg(tp, adv, fc);
4440
4441                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4442                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4443                         /* Normally during power down we want to autonegotiate
4444                          * the lowest possible speed for WOL. However, to avoid
4445                          * link flap, we leave it untouched.
4446                          */
4447                         return;
4448                 }
4449
4450                 tg3_writephy(tp, MII_BMCR,
4451                              BMCR_ANENABLE | BMCR_ANRESTART);
4452         } else {
4453                 int i;
4454                 u32 bmcr, orig_bmcr;
4455
4456                 tp->link_config.active_speed = tp->link_config.speed;
4457                 tp->link_config.active_duplex = tp->link_config.duplex;
4458
4459                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4460                         /* With autoneg disabled, 5715 only links up when the
4461                          * advertisement register has the configured speed
4462                          * enabled.
4463                          */
4464                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4465                 }
4466
4467                 bmcr = 0;
4468                 switch (tp->link_config.speed) {
4469                 default:
4470                 case SPEED_10:
4471                         break;
4472
4473                 case SPEED_100:
4474                         bmcr |= BMCR_SPEED100;
4475                         break;
4476
4477                 case SPEED_1000:
4478                         bmcr |= BMCR_SPEED1000;
4479                         break;
4480                 }
4481
4482                 if (tp->link_config.duplex == DUPLEX_FULL)
4483                         bmcr |= BMCR_FULLDPLX;
4484
4485                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4486                     (bmcr != orig_bmcr)) {
4487                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4488                         for (i = 0; i < 1500; i++) {
4489                                 u32 tmp;
4490
4491                                 udelay(10);
4492                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4493                                     tg3_readphy(tp, MII_BMSR, &tmp))
4494                                         continue;
4495                                 if (!(tmp & BMSR_LSTATUS)) {
4496                                         udelay(40);
4497                                         break;
4498                                 }
4499                         }
4500                         tg3_writephy(tp, MII_BMCR, bmcr);
4501                         udelay(40);
4502                 }
4503         }
4504 }
4505
4506 static int tg3_phy_pull_config(struct tg3 *tp)
4507 {
4508         int err;
4509         u32 val;
4510
4511         err = tg3_readphy(tp, MII_BMCR, &val);
4512         if (err)
4513                 goto done;
4514
4515         if (!(val & BMCR_ANENABLE)) {
4516                 tp->link_config.autoneg = AUTONEG_DISABLE;
4517                 tp->link_config.advertising = 0;
4518                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4519
4520                 err = -EIO;
4521
4522                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4523                 case 0:
4524                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4525                                 goto done;
4526
4527                         tp->link_config.speed = SPEED_10;
4528                         break;
4529                 case BMCR_SPEED100:
4530                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4531                                 goto done;
4532
4533                         tp->link_config.speed = SPEED_100;
4534                         break;
4535                 case BMCR_SPEED1000:
4536                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4537                                 tp->link_config.speed = SPEED_1000;
4538                                 break;
4539                         }
4540                         /* Fall through */
4541                 default:
4542                         goto done;
4543                 }
4544
4545                 if (val & BMCR_FULLDPLX)
4546                         tp->link_config.duplex = DUPLEX_FULL;
4547                 else
4548                         tp->link_config.duplex = DUPLEX_HALF;
4549
4550                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4551
4552                 err = 0;
4553                 goto done;
4554         }
4555
4556         tp->link_config.autoneg = AUTONEG_ENABLE;
4557         tp->link_config.advertising = ADVERTISED_Autoneg;
4558         tg3_flag_set(tp, PAUSE_AUTONEG);
4559
4560         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4561                 u32 adv;
4562
4563                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4564                 if (err)
4565                         goto done;
4566
4567                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4568                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4569
4570                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4571         } else {
4572                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4573         }
4574
4575         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4576                 u32 adv;
4577
4578                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4579                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4580                         if (err)
4581                                 goto done;
4582
4583                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4584                 } else {
4585                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4586                         if (err)
4587                                 goto done;
4588
4589                         adv = tg3_decode_flowctrl_1000X(val);
4590                         tp->link_config.flowctrl = adv;
4591
4592                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4593                         adv = mii_adv_to_ethtool_adv_x(val);
4594                 }
4595
4596                 tp->link_config.advertising |= adv;
4597         }
4598
4599 done:
4600         return err;
4601 }
4602
4603 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4604 {
4605         int err;
4606
4607         /* Turn off tap power management. */
4608         /* Set Extended packet length bit */
4609         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4610
4611         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4612         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4613         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4614         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4615         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4616
4617         udelay(40);
4618
4619         return err;
4620 }
4621
4622 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4623 {
4624         struct ethtool_eee eee;
4625
4626         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4627                 return true;
4628
4629         tg3_eee_pull_config(tp, &eee);
4630
4631         if (tp->eee.eee_enabled) {
4632                 if (tp->eee.advertised != eee.advertised ||
4633                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4634                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4635                         return false;
4636         } else {
4637                 /* EEE is disabled but we're advertising */
4638                 if (eee.advertised)
4639                         return false;
4640         }
4641
4642         return true;
4643 }
4644
4645 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4646 {
4647         u32 advmsk, tgtadv, advertising;
4648
4649         advertising = tp->link_config.advertising;
4650         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4651
4652         advmsk = ADVERTISE_ALL;
4653         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4654                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4655                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4656         }
4657
4658         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4659                 return false;
4660
4661         if ((*lcladv & advmsk) != tgtadv)
4662                 return false;
4663
4664         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4665                 u32 tg3_ctrl;
4666
4667                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4668
4669                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4670                         return false;
4671
4672                 if (tgtadv &&
4673                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4674                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4675                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4676                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4677                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4678                 } else {
4679                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4680                 }
4681
4682                 if (tg3_ctrl != tgtadv)
4683                         return false;
4684         }
4685
4686         return true;
4687 }
4688
4689 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4690 {
4691         u32 lpeth = 0;
4692
4693         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4694                 u32 val;
4695
4696                 if (tg3_readphy(tp, MII_STAT1000, &val))
4697                         return false;
4698
4699                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4700         }
4701
4702         if (tg3_readphy(tp, MII_LPA, rmtadv))
4703                 return false;
4704
4705         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4706         tp->link_config.rmt_adv = lpeth;
4707
4708         return true;
4709 }
4710
4711 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4712 {
4713         if (curr_link_up != tp->link_up) {
4714                 if (curr_link_up) {
4715                         netif_carrier_on(tp->dev);
4716                 } else {
4717                         netif_carrier_off(tp->dev);
4718                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4719                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4720                 }
4721
4722                 tg3_link_report(tp);
4723                 return true;
4724         }
4725
4726         return false;
4727 }
4728
4729 static void tg3_clear_mac_status(struct tg3 *tp)
4730 {
4731         tw32(MAC_EVENT, 0);
4732
4733         tw32_f(MAC_STATUS,
4734                MAC_STATUS_SYNC_CHANGED |
4735                MAC_STATUS_CFG_CHANGED |
4736                MAC_STATUS_MI_COMPLETION |
4737                MAC_STATUS_LNKSTATE_CHANGED);
4738         udelay(40);
4739 }
4740
4741 static void tg3_setup_eee(struct tg3 *tp)
4742 {
4743         u32 val;
4744
4745         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4746               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4747         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4748                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4749
4750         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4751
4752         tw32_f(TG3_CPMU_EEE_CTRL,
4753                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4754
4755         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4756               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4757               TG3_CPMU_EEEMD_LPI_IN_RX |
4758               TG3_CPMU_EEEMD_EEE_ENABLE;
4759
4760         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4761                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4762
4763         if (tg3_flag(tp, ENABLE_APE))
4764                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4765
4766         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4767
4768         tw32_f(TG3_CPMU_EEE_DBTMR1,
4769                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4770                (tp->eee.tx_lpi_timer & 0xffff));
4771
4772         tw32_f(TG3_CPMU_EEE_DBTMR2,
4773                TG3_CPMU_DBTMR2_APE_TX_2047US |
4774                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4775 }
4776
4777 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4778 {
4779         bool current_link_up;
4780         u32 bmsr, val;
4781         u32 lcl_adv, rmt_adv;
4782         u16 current_speed;
4783         u8 current_duplex;
4784         int i, err;
4785
4786         tg3_clear_mac_status(tp);
4787
4788         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4789                 tw32_f(MAC_MI_MODE,
4790                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4791                 udelay(80);
4792         }
4793
4794         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4795
4796         /* Some third-party PHYs need to be reset on link going
4797          * down.
4798          */
4799         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4800              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4801              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4802             tp->link_up) {
4803                 tg3_readphy(tp, MII_BMSR, &bmsr);
4804                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4805                     !(bmsr & BMSR_LSTATUS))
4806                         force_reset = true;
4807         }
4808         if (force_reset)
4809                 tg3_phy_reset(tp);
4810
4811         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4812                 tg3_readphy(tp, MII_BMSR, &bmsr);
4813                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4814                     !tg3_flag(tp, INIT_COMPLETE))
4815                         bmsr = 0;
4816
4817                 if (!(bmsr & BMSR_LSTATUS)) {
4818                         err = tg3_init_5401phy_dsp(tp);
4819                         if (err)
4820                                 return err;
4821
4822                         tg3_readphy(tp, MII_BMSR, &bmsr);
4823                         for (i = 0; i < 1000; i++) {
4824                                 udelay(10);
4825                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4826                                     (bmsr & BMSR_LSTATUS)) {
4827                                         udelay(40);
4828                                         break;
4829                                 }
4830                         }
4831
4832                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4833                             TG3_PHY_REV_BCM5401_B0 &&
4834                             !(bmsr & BMSR_LSTATUS) &&
4835                             tp->link_config.active_speed == SPEED_1000) {
4836                                 err = tg3_phy_reset(tp);
4837                                 if (!err)
4838                                         err = tg3_init_5401phy_dsp(tp);
4839                                 if (err)
4840                                         return err;
4841                         }
4842                 }
4843         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4844                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4845                 /* 5701 {A0,B0} CRC bug workaround */
4846                 tg3_writephy(tp, 0x15, 0x0a75);
4847                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4849                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4850         }
4851
4852         /* Clear pending interrupts... */
4853         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4854         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4855
4856         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4857                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4858         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4859                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4860
4861         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4862             tg3_asic_rev(tp) == ASIC_REV_5701) {
4863                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4864                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4865                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4866                 else
4867                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4868         }
4869
4870         current_link_up = false;
4871         current_speed = SPEED_UNKNOWN;
4872         current_duplex = DUPLEX_UNKNOWN;
4873         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4874         tp->link_config.rmt_adv = 0;
4875
4876         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4877                 err = tg3_phy_auxctl_read(tp,
4878                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4879                                           &val);
4880                 if (!err && !(val & (1 << 10))) {
4881                         tg3_phy_auxctl_write(tp,
4882                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4883                                              val | (1 << 10));
4884                         goto relink;
4885                 }
4886         }
4887
4888         bmsr = 0;
4889         for (i = 0; i < 100; i++) {
4890                 tg3_readphy(tp, MII_BMSR, &bmsr);
4891                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4892                     (bmsr & BMSR_LSTATUS))
4893                         break;
4894                 udelay(40);
4895         }
4896
4897         if (bmsr & BMSR_LSTATUS) {
4898                 u32 aux_stat, bmcr;
4899
4900                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4901                 for (i = 0; i < 2000; i++) {
4902                         udelay(10);
4903                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4904                             aux_stat)
4905                                 break;
4906                 }
4907
4908                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4909                                              &current_speed,
4910                                              &current_duplex);
4911
4912                 bmcr = 0;
4913                 for (i = 0; i < 200; i++) {
4914                         tg3_readphy(tp, MII_BMCR, &bmcr);
4915                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4916                                 continue;
4917                         if (bmcr && bmcr != 0x7fff)
4918                                 break;
4919                         udelay(10);
4920                 }
4921
4922                 lcl_adv = 0;
4923                 rmt_adv = 0;
4924
4925                 tp->link_config.active_speed = current_speed;
4926                 tp->link_config.active_duplex = current_duplex;
4927
4928                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4929                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4930
4931                         if ((bmcr & BMCR_ANENABLE) &&
4932                             eee_config_ok &&
4933                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4934                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4935                                 current_link_up = true;
4936
4937                         /* EEE settings changes take effect only after a phy
4938                          * reset.  If we have skipped a reset due to Link Flap
4939                          * Avoidance being enabled, do it now.
4940                          */
4941                         if (!eee_config_ok &&
4942                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4943                             !force_reset) {
4944                                 tg3_setup_eee(tp);
4945                                 tg3_phy_reset(tp);
4946                         }
4947                 } else {
4948                         if (!(bmcr & BMCR_ANENABLE) &&
4949                             tp->link_config.speed == current_speed &&
4950                             tp->link_config.duplex == current_duplex) {
4951                                 current_link_up = true;
4952                         }
4953                 }
4954
4955                 if (current_link_up &&
4956                     tp->link_config.active_duplex == DUPLEX_FULL) {
4957                         u32 reg, bit;
4958
4959                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4960                                 reg = MII_TG3_FET_GEN_STAT;
4961                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4962                         } else {
4963                                 reg = MII_TG3_EXT_STAT;
4964                                 bit = MII_TG3_EXT_STAT_MDIX;
4965                         }
4966
4967                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4968                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4969
4970                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4971                 }
4972         }
4973
4974 relink:
4975         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4976                 tg3_phy_copper_begin(tp);
4977
4978                 if (tg3_flag(tp, ROBOSWITCH)) {
4979                         current_link_up = true;
4980                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4981                         current_speed = SPEED_1000;
4982                         current_duplex = DUPLEX_FULL;
4983                         tp->link_config.active_speed = current_speed;
4984                         tp->link_config.active_duplex = current_duplex;
4985                 }
4986
4987                 tg3_readphy(tp, MII_BMSR, &bmsr);
4988                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4989                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4990                         current_link_up = true;
4991         }
4992
4993         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4994         if (current_link_up) {
4995                 if (tp->link_config.active_speed == SPEED_100 ||
4996                     tp->link_config.active_speed == SPEED_10)
4997                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4998                 else
4999                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5001                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5002         else
5003                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5004
5005         /* In order for the 5750 core in BCM4785 chip to work properly
5006          * in RGMII mode, the Led Control Register must be set up.
5007          */
5008         if (tg3_flag(tp, RGMII_MODE)) {
5009                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5010                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5011
5012                 if (tp->link_config.active_speed == SPEED_10)
5013                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5014                 else if (tp->link_config.active_speed == SPEED_100)
5015                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016                                      LED_CTRL_100MBPS_ON);
5017                 else if (tp->link_config.active_speed == SPEED_1000)
5018                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5019                                      LED_CTRL_1000MBPS_ON);
5020
5021                 tw32(MAC_LED_CTRL, led_ctrl);
5022                 udelay(40);
5023         }
5024
5025         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5026         if (tp->link_config.active_duplex == DUPLEX_HALF)
5027                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5028
5029         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5030                 if (current_link_up &&
5031                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5032                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5033                 else
5034                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5035         }
5036
5037         /* ??? Without this setting Netgear GA302T PHY does not
5038          * ??? send/receive packets...
5039          */
5040         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5041             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5042                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5043                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5044                 udelay(80);
5045         }
5046
5047         tw32_f(MAC_MODE, tp->mac_mode);
5048         udelay(40);
5049
5050         tg3_phy_eee_adjust(tp, current_link_up);
5051
5052         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5053                 /* Polled via timer. */
5054                 tw32_f(MAC_EVENT, 0);
5055         } else {
5056                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5057         }
5058         udelay(40);
5059
5060         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5061             current_link_up &&
5062             tp->link_config.active_speed == SPEED_1000 &&
5063             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5064                 udelay(120);
5065                 tw32_f(MAC_STATUS,
5066                      (MAC_STATUS_SYNC_CHANGED |
5067                       MAC_STATUS_CFG_CHANGED));
5068                 udelay(40);
5069                 tg3_write_mem(tp,
5070                               NIC_SRAM_FIRMWARE_MBOX,
5071                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5072         }
5073
5074         /* Prevent send BD corruption. */
5075         if (tg3_flag(tp, CLKREQ_BUG)) {
5076                 if (tp->link_config.active_speed == SPEED_100 ||
5077                     tp->link_config.active_speed == SPEED_10)
5078                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5079                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5080                 else
5081                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5082                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5083         }
5084
5085         tg3_test_and_report_link_chg(tp, current_link_up);
5086
5087         return 0;
5088 }
5089
5090 struct tg3_fiber_aneginfo {
5091         int state;
5092 #define ANEG_STATE_UNKNOWN              0
5093 #define ANEG_STATE_AN_ENABLE            1
5094 #define ANEG_STATE_RESTART_INIT         2
5095 #define ANEG_STATE_RESTART              3
5096 #define ANEG_STATE_DISABLE_LINK_OK      4
5097 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5098 #define ANEG_STATE_ABILITY_DETECT       6
5099 #define ANEG_STATE_ACK_DETECT_INIT      7
5100 #define ANEG_STATE_ACK_DETECT           8
5101 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5102 #define ANEG_STATE_COMPLETE_ACK         10
5103 #define ANEG_STATE_IDLE_DETECT_INIT     11
5104 #define ANEG_STATE_IDLE_DETECT          12
5105 #define ANEG_STATE_LINK_OK              13
5106 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5107 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5108
5109         u32 flags;
5110 #define MR_AN_ENABLE            0x00000001
5111 #define MR_RESTART_AN           0x00000002
5112 #define MR_AN_COMPLETE          0x00000004
5113 #define MR_PAGE_RX              0x00000008
5114 #define MR_NP_LOADED            0x00000010
5115 #define MR_TOGGLE_TX            0x00000020
5116 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5117 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5118 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5119 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5120 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5121 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5122 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5123 #define MR_TOGGLE_RX            0x00002000
5124 #define MR_NP_RX                0x00004000
5125
5126 #define MR_LINK_OK              0x80000000
5127
5128         unsigned long link_time, cur_time;
5129
5130         u32 ability_match_cfg;
5131         int ability_match_count;
5132
5133         char ability_match, idle_match, ack_match;
5134
5135         u32 txconfig, rxconfig;
5136 #define ANEG_CFG_NP             0x00000080
5137 #define ANEG_CFG_ACK            0x00000040
5138 #define ANEG_CFG_RF2            0x00000020
5139 #define ANEG_CFG_RF1            0x00000010
5140 #define ANEG_CFG_PS2            0x00000001
5141 #define ANEG_CFG_PS1            0x00008000
5142 #define ANEG_CFG_HD             0x00004000
5143 #define ANEG_CFG_FD             0x00002000
5144 #define ANEG_CFG_INVAL          0x00001f06
5145
5146 };
5147 #define ANEG_OK         0
5148 #define ANEG_DONE       1
5149 #define ANEG_TIMER_ENAB 2
5150 #define ANEG_FAILED     -1
5151
5152 #define ANEG_STATE_SETTLE_TIME  10000
5153
5154 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5155                                    struct tg3_fiber_aneginfo *ap)
5156 {
5157         u16 flowctrl;
5158         unsigned long delta;
5159         u32 rx_cfg_reg;
5160         int ret;
5161
5162         if (ap->state == ANEG_STATE_UNKNOWN) {
5163                 ap->rxconfig = 0;
5164                 ap->link_time = 0;
5165                 ap->cur_time = 0;
5166                 ap->ability_match_cfg = 0;
5167                 ap->ability_match_count = 0;
5168                 ap->ability_match = 0;
5169                 ap->idle_match = 0;
5170                 ap->ack_match = 0;
5171         }
5172         ap->cur_time++;
5173
5174         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5175                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5176
5177                 if (rx_cfg_reg != ap->ability_match_cfg) {
5178                         ap->ability_match_cfg = rx_cfg_reg;
5179                         ap->ability_match = 0;
5180                         ap->ability_match_count = 0;
5181                 } else {
5182                         if (++ap->ability_match_count > 1) {
5183                                 ap->ability_match = 1;
5184                                 ap->ability_match_cfg = rx_cfg_reg;
5185                         }
5186                 }
5187                 if (rx_cfg_reg & ANEG_CFG_ACK)
5188                         ap->ack_match = 1;
5189                 else
5190                         ap->ack_match = 0;
5191
5192                 ap->idle_match = 0;
5193         } else {
5194                 ap->idle_match = 1;
5195                 ap->ability_match_cfg = 0;
5196                 ap->ability_match_count = 0;
5197                 ap->ability_match = 0;
5198                 ap->ack_match = 0;
5199
5200                 rx_cfg_reg = 0;
5201         }
5202
5203         ap->rxconfig = rx_cfg_reg;
5204         ret = ANEG_OK;
5205
5206         switch (ap->state) {
5207         case ANEG_STATE_UNKNOWN:
5208                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5209                         ap->state = ANEG_STATE_AN_ENABLE;
5210
5211                 /* fallthru */
5212         case ANEG_STATE_AN_ENABLE:
5213                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5214                 if (ap->flags & MR_AN_ENABLE) {
5215                         ap->link_time = 0;
5216                         ap->cur_time = 0;
5217                         ap->ability_match_cfg = 0;
5218                         ap->ability_match_count = 0;
5219                         ap->ability_match = 0;
5220                         ap->idle_match = 0;
5221                         ap->ack_match = 0;
5222
5223                         ap->state = ANEG_STATE_RESTART_INIT;
5224                 } else {
5225                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5226                 }
5227                 break;
5228
5229         case ANEG_STATE_RESTART_INIT:
5230                 ap->link_time = ap->cur_time;
5231                 ap->flags &= ~(MR_NP_LOADED);
5232                 ap->txconfig = 0;
5233                 tw32(MAC_TX_AUTO_NEG, 0);
5234                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5235                 tw32_f(MAC_MODE, tp->mac_mode);
5236                 udelay(40);
5237
5238                 ret = ANEG_TIMER_ENAB;
5239                 ap->state = ANEG_STATE_RESTART;
5240
5241                 /* fallthru */
5242         case ANEG_STATE_RESTART:
5243                 delta = ap->cur_time - ap->link_time;
5244                 if (delta > ANEG_STATE_SETTLE_TIME)
5245                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5246                 else
5247                         ret = ANEG_TIMER_ENAB;
5248                 break;
5249
5250         case ANEG_STATE_DISABLE_LINK_OK:
5251                 ret = ANEG_DONE;
5252                 break;
5253
5254         case ANEG_STATE_ABILITY_DETECT_INIT:
5255                 ap->flags &= ~(MR_TOGGLE_TX);
5256                 ap->txconfig = ANEG_CFG_FD;
5257                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5258                 if (flowctrl & ADVERTISE_1000XPAUSE)
5259                         ap->txconfig |= ANEG_CFG_PS1;
5260                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5261                         ap->txconfig |= ANEG_CFG_PS2;
5262                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5263                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5264                 tw32_f(MAC_MODE, tp->mac_mode);
5265                 udelay(40);
5266
5267                 ap->state = ANEG_STATE_ABILITY_DETECT;
5268                 break;
5269
5270         case ANEG_STATE_ABILITY_DETECT:
5271                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5272                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5273                 break;
5274
5275         case ANEG_STATE_ACK_DETECT_INIT:
5276                 ap->txconfig |= ANEG_CFG_ACK;
5277                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5278                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5279                 tw32_f(MAC_MODE, tp->mac_mode);
5280                 udelay(40);
5281
5282                 ap->state = ANEG_STATE_ACK_DETECT;
5283
5284                 /* fallthru */
5285         case ANEG_STATE_ACK_DETECT:
5286                 if (ap->ack_match != 0) {
5287                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5288                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5289                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5290                         } else {
5291                                 ap->state = ANEG_STATE_AN_ENABLE;
5292                         }
5293                 } else if (ap->ability_match != 0 &&
5294                            ap->rxconfig == 0) {
5295                         ap->state = ANEG_STATE_AN_ENABLE;
5296                 }
5297                 break;
5298
5299         case ANEG_STATE_COMPLETE_ACK_INIT:
5300                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5301                         ret = ANEG_FAILED;
5302                         break;
5303                 }
5304                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5305                                MR_LP_ADV_HALF_DUPLEX |
5306                                MR_LP_ADV_SYM_PAUSE |
5307                                MR_LP_ADV_ASYM_PAUSE |
5308                                MR_LP_ADV_REMOTE_FAULT1 |
5309                                MR_LP_ADV_REMOTE_FAULT2 |
5310                                MR_LP_ADV_NEXT_PAGE |
5311                                MR_TOGGLE_RX |
5312                                MR_NP_RX);
5313                 if (ap->rxconfig & ANEG_CFG_FD)
5314                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5315                 if (ap->rxconfig & ANEG_CFG_HD)
5316                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5317                 if (ap->rxconfig & ANEG_CFG_PS1)
5318                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5319                 if (ap->rxconfig & ANEG_CFG_PS2)
5320                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5321                 if (ap->rxconfig & ANEG_CFG_RF1)
5322                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5323                 if (ap->rxconfig & ANEG_CFG_RF2)
5324                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5325                 if (ap->rxconfig & ANEG_CFG_NP)
5326                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5327
5328                 ap->link_time = ap->cur_time;
5329
5330                 ap->flags ^= (MR_TOGGLE_TX);
5331                 if (ap->rxconfig & 0x0008)
5332                         ap->flags |= MR_TOGGLE_RX;
5333                 if (ap->rxconfig & ANEG_CFG_NP)
5334                         ap->flags |= MR_NP_RX;
5335                 ap->flags |= MR_PAGE_RX;
5336
5337                 ap->state = ANEG_STATE_COMPLETE_ACK;
5338                 ret = ANEG_TIMER_ENAB;
5339                 break;
5340
5341         case ANEG_STATE_COMPLETE_ACK:
5342                 if (ap->ability_match != 0 &&
5343                     ap->rxconfig == 0) {
5344                         ap->state = ANEG_STATE_AN_ENABLE;
5345                         break;
5346                 }
5347                 delta = ap->cur_time - ap->link_time;
5348                 if (delta > ANEG_STATE_SETTLE_TIME) {
5349                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5350                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5351                         } else {
5352                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5353                                     !(ap->flags & MR_NP_RX)) {
5354                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5355                                 } else {
5356                                         ret = ANEG_FAILED;
5357                                 }
5358                         }
5359                 }
5360                 break;
5361
5362         case ANEG_STATE_IDLE_DETECT_INIT:
5363                 ap->link_time = ap->cur_time;
5364                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5365                 tw32_f(MAC_MODE, tp->mac_mode);
5366                 udelay(40);
5367
5368                 ap->state = ANEG_STATE_IDLE_DETECT;
5369                 ret = ANEG_TIMER_ENAB;
5370                 break;
5371
5372         case ANEG_STATE_IDLE_DETECT:
5373                 if (ap->ability_match != 0 &&
5374                     ap->rxconfig == 0) {
5375                         ap->state = ANEG_STATE_AN_ENABLE;
5376                         break;
5377                 }
5378                 delta = ap->cur_time - ap->link_time;
5379                 if (delta > ANEG_STATE_SETTLE_TIME) {
5380                         /* XXX another gem from the Broadcom driver :( */
5381                         ap->state = ANEG_STATE_LINK_OK;
5382                 }
5383                 break;
5384
5385         case ANEG_STATE_LINK_OK:
5386                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5387                 ret = ANEG_DONE;
5388                 break;
5389
5390         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5391                 /* ??? unimplemented */
5392                 break;
5393
5394         case ANEG_STATE_NEXT_PAGE_WAIT:
5395                 /* ??? unimplemented */
5396                 break;
5397
5398         default:
5399                 ret = ANEG_FAILED;
5400                 break;
5401         }
5402
5403         return ret;
5404 }
5405
5406 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5407 {
5408         int res = 0;
5409         struct tg3_fiber_aneginfo aninfo;
5410         int status = ANEG_FAILED;
5411         unsigned int tick;
5412         u32 tmp;
5413
5414         tw32_f(MAC_TX_AUTO_NEG, 0);
5415
5416         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5417         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5418         udelay(40);
5419
5420         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5421         udelay(40);
5422
5423         memset(&aninfo, 0, sizeof(aninfo));
5424         aninfo.flags |= MR_AN_ENABLE;
5425         aninfo.state = ANEG_STATE_UNKNOWN;
5426         aninfo.cur_time = 0;
5427         tick = 0;
5428         while (++tick < 195000) {
5429                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5430                 if (status == ANEG_DONE || status == ANEG_FAILED)
5431                         break;
5432
5433                 udelay(1);
5434         }
5435
5436         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5437         tw32_f(MAC_MODE, tp->mac_mode);
5438         udelay(40);
5439
5440         *txflags = aninfo.txconfig;
5441         *rxflags = aninfo.flags;
5442
5443         if (status == ANEG_DONE &&
5444             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5445                              MR_LP_ADV_FULL_DUPLEX)))
5446                 res = 1;
5447
5448         return res;
5449 }
5450
5451 static void tg3_init_bcm8002(struct tg3 *tp)
5452 {
5453         u32 mac_status = tr32(MAC_STATUS);
5454         int i;
5455
5456         /* Reset when initting first time or we have a link. */
5457         if (tg3_flag(tp, INIT_COMPLETE) &&
5458             !(mac_status & MAC_STATUS_PCS_SYNCED))
5459                 return;
5460
5461         /* Set PLL lock range. */
5462         tg3_writephy(tp, 0x16, 0x8007);
5463
5464         /* SW reset */
5465         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5466
5467         /* Wait for reset to complete. */
5468         /* XXX schedule_timeout() ... */
5469         for (i = 0; i < 500; i++)
5470                 udelay(10);
5471
5472         /* Config mode; select PMA/Ch 1 regs. */
5473         tg3_writephy(tp, 0x10, 0x8411);
5474
5475         /* Enable auto-lock and comdet, select txclk for tx. */
5476         tg3_writephy(tp, 0x11, 0x0a10);
5477
5478         tg3_writephy(tp, 0x18, 0x00a0);
5479         tg3_writephy(tp, 0x16, 0x41ff);
5480
5481         /* Assert and deassert POR. */
5482         tg3_writephy(tp, 0x13, 0x0400);
5483         udelay(40);
5484         tg3_writephy(tp, 0x13, 0x0000);
5485
5486         tg3_writephy(tp, 0x11, 0x0a50);
5487         udelay(40);
5488         tg3_writephy(tp, 0x11, 0x0a10);
5489
5490         /* Wait for signal to stabilize */
5491         /* XXX schedule_timeout() ... */
5492         for (i = 0; i < 15000; i++)
5493                 udelay(10);
5494
5495         /* Deselect the channel register so we can read the PHYID
5496          * later.
5497          */
5498         tg3_writephy(tp, 0x10, 0x8011);
5499 }
5500
5501 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5502 {
5503         u16 flowctrl;
5504         bool current_link_up;
5505         u32 sg_dig_ctrl, sg_dig_status;
5506         u32 serdes_cfg, expected_sg_dig_ctrl;
5507         int workaround, port_a;
5508
5509         serdes_cfg = 0;
5510         expected_sg_dig_ctrl = 0;
5511         workaround = 0;
5512         port_a = 1;
5513         current_link_up = false;
5514
5515         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5516             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5517                 workaround = 1;
5518                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5519                         port_a = 0;
5520
5521                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5522                 /* preserve bits 20-23 for voltage regulator */
5523                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5524         }
5525
5526         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5527
5528         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5529                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5530                         if (workaround) {
5531                                 u32 val = serdes_cfg;
5532
5533                                 if (port_a)
5534                                         val |= 0xc010000;
5535                                 else
5536                                         val |= 0x4010000;
5537                                 tw32_f(MAC_SERDES_CFG, val);
5538                         }
5539
5540                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5541                 }
5542                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5543                         tg3_setup_flow_control(tp, 0, 0);
5544                         current_link_up = true;
5545                 }
5546                 goto out;
5547         }
5548
5549         /* Want auto-negotiation.  */
5550         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5551
5552         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5553         if (flowctrl & ADVERTISE_1000XPAUSE)
5554                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5555         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5556                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5557
5558         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5559                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5560                     tp->serdes_counter &&
5561                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5562                                     MAC_STATUS_RCVD_CFG)) ==
5563                      MAC_STATUS_PCS_SYNCED)) {
5564                         tp->serdes_counter--;
5565                         current_link_up = true;
5566                         goto out;
5567                 }
5568 restart_autoneg:
5569                 if (workaround)
5570                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5571                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5572                 udelay(5);
5573                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5574
5575                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5576                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5577         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5578                                  MAC_STATUS_SIGNAL_DET)) {
5579                 sg_dig_status = tr32(SG_DIG_STATUS);
5580                 mac_status = tr32(MAC_STATUS);
5581
5582                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5583                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5584                         u32 local_adv = 0, remote_adv = 0;
5585
5586                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5587                                 local_adv |= ADVERTISE_1000XPAUSE;
5588                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5589                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5590
5591                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5592                                 remote_adv |= LPA_1000XPAUSE;
5593                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5594                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5595
5596                         tp->link_config.rmt_adv =
5597                                            mii_adv_to_ethtool_adv_x(remote_adv);
5598
5599                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5600                         current_link_up = true;
5601                         tp->serdes_counter = 0;
5602                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5603                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5604                         if (tp->serdes_counter)
5605                                 tp->serdes_counter--;
5606                         else {
5607                                 if (workaround) {
5608                                         u32 val = serdes_cfg;
5609
5610                                         if (port_a)
5611                                                 val |= 0xc010000;
5612                                         else
5613                                                 val |= 0x4010000;
5614
5615                                         tw32_f(MAC_SERDES_CFG, val);
5616                                 }
5617
5618                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5619                                 udelay(40);
5620
5621                                 /* Link parallel detection - link is up */
5622                                 /* only if we have PCS_SYNC and not */
5623                                 /* receiving config code words */
5624                                 mac_status = tr32(MAC_STATUS);
5625                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5626                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5627                                         tg3_setup_flow_control(tp, 0, 0);
5628                                         current_link_up = true;
5629                                         tp->phy_flags |=
5630                                                 TG3_PHYFLG_PARALLEL_DETECT;
5631                                         tp->serdes_counter =
5632                                                 SERDES_PARALLEL_DET_TIMEOUT;
5633                                 } else
5634                                         goto restart_autoneg;
5635                         }
5636                 }
5637         } else {
5638                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5639                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5640         }
5641
5642 out:
5643         return current_link_up;
5644 }
5645
5646 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5647 {
5648         bool current_link_up = false;
5649
5650         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5651                 goto out;
5652
5653         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5654                 u32 txflags, rxflags;
5655                 int i;
5656
5657                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5658                         u32 local_adv = 0, remote_adv = 0;
5659
5660                         if (txflags & ANEG_CFG_PS1)
5661                                 local_adv |= ADVERTISE_1000XPAUSE;
5662                         if (txflags & ANEG_CFG_PS2)
5663                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5664
5665                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5666                                 remote_adv |= LPA_1000XPAUSE;
5667                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5668                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5669
5670                         tp->link_config.rmt_adv =
5671                                            mii_adv_to_ethtool_adv_x(remote_adv);
5672
5673                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5674
5675                         current_link_up = true;
5676                 }
5677                 for (i = 0; i < 30; i++) {
5678                         udelay(20);
5679                         tw32_f(MAC_STATUS,
5680                                (MAC_STATUS_SYNC_CHANGED |
5681                                 MAC_STATUS_CFG_CHANGED));
5682                         udelay(40);
5683                         if ((tr32(MAC_STATUS) &
5684                              (MAC_STATUS_SYNC_CHANGED |
5685                               MAC_STATUS_CFG_CHANGED)) == 0)
5686                                 break;
5687                 }
5688
5689                 mac_status = tr32(MAC_STATUS);
5690                 if (!current_link_up &&
5691                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5692                     !(mac_status & MAC_STATUS_RCVD_CFG))
5693                         current_link_up = true;
5694         } else {
5695                 tg3_setup_flow_control(tp, 0, 0);
5696
5697                 /* Forcing 1000FD link up. */
5698                 current_link_up = true;
5699
5700                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5701                 udelay(40);
5702
5703                 tw32_f(MAC_MODE, tp->mac_mode);
5704                 udelay(40);
5705         }
5706
5707 out:
5708         return current_link_up;
5709 }
5710
5711 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5712 {
5713         u32 orig_pause_cfg;
5714         u16 orig_active_speed;
5715         u8 orig_active_duplex;
5716         u32 mac_status;
5717         bool current_link_up;
5718         int i;
5719
5720         orig_pause_cfg = tp->link_config.active_flowctrl;
5721         orig_active_speed = tp->link_config.active_speed;
5722         orig_active_duplex = tp->link_config.active_duplex;
5723
5724         if (!tg3_flag(tp, HW_AUTONEG) &&
5725             tp->link_up &&
5726             tg3_flag(tp, INIT_COMPLETE)) {
5727                 mac_status = tr32(MAC_STATUS);
5728                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5729                                MAC_STATUS_SIGNAL_DET |
5730                                MAC_STATUS_CFG_CHANGED |
5731                                MAC_STATUS_RCVD_CFG);
5732                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5733                                    MAC_STATUS_SIGNAL_DET)) {
5734                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5735                                             MAC_STATUS_CFG_CHANGED));
5736                         return 0;
5737                 }
5738         }
5739
5740         tw32_f(MAC_TX_AUTO_NEG, 0);
5741
5742         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5743         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5744         tw32_f(MAC_MODE, tp->mac_mode);
5745         udelay(40);
5746
5747         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5748                 tg3_init_bcm8002(tp);
5749
5750         /* Enable link change event even when serdes polling.  */
5751         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5752         udelay(40);
5753
5754         current_link_up = false;
5755         tp->link_config.rmt_adv = 0;
5756         mac_status = tr32(MAC_STATUS);
5757
5758         if (tg3_flag(tp, HW_AUTONEG))
5759                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5760         else
5761                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5762
5763         tp->napi[0].hw_status->status =
5764                 (SD_STATUS_UPDATED |
5765                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5766
5767         for (i = 0; i < 100; i++) {
5768                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5769                                     MAC_STATUS_CFG_CHANGED));
5770                 udelay(5);
5771                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5772                                          MAC_STATUS_CFG_CHANGED |
5773                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5774                         break;
5775         }
5776
5777         mac_status = tr32(MAC_STATUS);
5778         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5779                 current_link_up = false;
5780                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5781                     tp->serdes_counter == 0) {
5782                         tw32_f(MAC_MODE, (tp->mac_mode |
5783                                           MAC_MODE_SEND_CONFIGS));
5784                         udelay(1);
5785                         tw32_f(MAC_MODE, tp->mac_mode);
5786                 }
5787         }
5788
5789         if (current_link_up) {
5790                 tp->link_config.active_speed = SPEED_1000;
5791                 tp->link_config.active_duplex = DUPLEX_FULL;
5792                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793                                     LED_CTRL_LNKLED_OVERRIDE |
5794                                     LED_CTRL_1000MBPS_ON));
5795         } else {
5796                 tp->link_config.active_speed = SPEED_UNKNOWN;
5797                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5798                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5799                                     LED_CTRL_LNKLED_OVERRIDE |
5800                                     LED_CTRL_TRAFFIC_OVERRIDE));
5801         }
5802
5803         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5804                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5805                 if (orig_pause_cfg != now_pause_cfg ||
5806                     orig_active_speed != tp->link_config.active_speed ||
5807                     orig_active_duplex != tp->link_config.active_duplex)
5808                         tg3_link_report(tp);
5809         }
5810
5811         return 0;
5812 }
5813
5814 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5815 {
5816         int err = 0;
5817         u32 bmsr, bmcr;
5818         u16 current_speed = SPEED_UNKNOWN;
5819         u8 current_duplex = DUPLEX_UNKNOWN;
5820         bool current_link_up = false;
5821         u32 local_adv, remote_adv, sgsr;
5822
5823         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5824              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5825              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5826              (sgsr & SERDES_TG3_SGMII_MODE)) {
5827
5828                 if (force_reset)
5829                         tg3_phy_reset(tp);
5830
5831                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5832
5833                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5834                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835                 } else {
5836                         current_link_up = true;
5837                         if (sgsr & SERDES_TG3_SPEED_1000) {
5838                                 current_speed = SPEED_1000;
5839                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5840                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5841                                 current_speed = SPEED_100;
5842                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843                         } else {
5844                                 current_speed = SPEED_10;
5845                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5846                         }
5847
5848                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5849                                 current_duplex = DUPLEX_FULL;
5850                         else
5851                                 current_duplex = DUPLEX_HALF;
5852                 }
5853
5854                 tw32_f(MAC_MODE, tp->mac_mode);
5855                 udelay(40);
5856
5857                 tg3_clear_mac_status(tp);
5858
5859                 goto fiber_setup_done;
5860         }
5861
5862         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5863         tw32_f(MAC_MODE, tp->mac_mode);
5864         udelay(40);
5865
5866         tg3_clear_mac_status(tp);
5867
5868         if (force_reset)
5869                 tg3_phy_reset(tp);
5870
5871         tp->link_config.rmt_adv = 0;
5872
5873         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5874         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5876                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5877                         bmsr |= BMSR_LSTATUS;
5878                 else
5879                         bmsr &= ~BMSR_LSTATUS;
5880         }
5881
5882         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5883
5884         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5885             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5886                 /* do nothing, just check for link up at the end */
5887         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5888                 u32 adv, newadv;
5889
5890                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5891                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5892                                  ADVERTISE_1000XPAUSE |
5893                                  ADVERTISE_1000XPSE_ASYM |
5894                                  ADVERTISE_SLCT);
5895
5896                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5897                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5898
5899                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5900                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5901                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5902                         tg3_writephy(tp, MII_BMCR, bmcr);
5903
5904                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5905                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5906                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5907
5908                         return err;
5909                 }
5910         } else {
5911                 u32 new_bmcr;
5912
5913                 bmcr &= ~BMCR_SPEED1000;
5914                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5915
5916                 if (tp->link_config.duplex == DUPLEX_FULL)
5917                         new_bmcr |= BMCR_FULLDPLX;
5918
5919                 if (new_bmcr != bmcr) {
5920                         /* BMCR_SPEED1000 is a reserved bit that needs
5921                          * to be set on write.
5922                          */
5923                         new_bmcr |= BMCR_SPEED1000;
5924
5925                         /* Force a linkdown */
5926                         if (tp->link_up) {
5927                                 u32 adv;
5928
5929                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5930                                 adv &= ~(ADVERTISE_1000XFULL |
5931                                          ADVERTISE_1000XHALF |
5932                                          ADVERTISE_SLCT);
5933                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5934                                 tg3_writephy(tp, MII_BMCR, bmcr |
5935                                                            BMCR_ANRESTART |
5936                                                            BMCR_ANENABLE);
5937                                 udelay(10);
5938                                 tg3_carrier_off(tp);
5939                         }
5940                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5941                         bmcr = new_bmcr;
5942                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5943                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5944                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5945                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5946                                         bmsr |= BMSR_LSTATUS;
5947                                 else
5948                                         bmsr &= ~BMSR_LSTATUS;
5949                         }
5950                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5951                 }
5952         }
5953
5954         if (bmsr & BMSR_LSTATUS) {
5955                 current_speed = SPEED_1000;
5956                 current_link_up = true;
5957                 if (bmcr & BMCR_FULLDPLX)
5958                         current_duplex = DUPLEX_FULL;
5959                 else
5960                         current_duplex = DUPLEX_HALF;
5961
5962                 local_adv = 0;
5963                 remote_adv = 0;
5964
5965                 if (bmcr & BMCR_ANENABLE) {
5966                         u32 common;
5967
5968                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5969                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5970                         common = local_adv & remote_adv;
5971                         if (common & (ADVERTISE_1000XHALF |
5972                                       ADVERTISE_1000XFULL)) {
5973                                 if (common & ADVERTISE_1000XFULL)
5974                                         current_duplex = DUPLEX_FULL;
5975                                 else
5976                                         current_duplex = DUPLEX_HALF;
5977
5978                                 tp->link_config.rmt_adv =
5979                                            mii_adv_to_ethtool_adv_x(remote_adv);
5980                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5981                                 /* Link is up via parallel detect */
5982                         } else {
5983                                 current_link_up = false;
5984                         }
5985                 }
5986         }
5987
5988 fiber_setup_done:
5989         if (current_link_up && current_duplex == DUPLEX_FULL)
5990                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5991
5992         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5993         if (tp->link_config.active_duplex == DUPLEX_HALF)
5994                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5995
5996         tw32_f(MAC_MODE, tp->mac_mode);
5997         udelay(40);
5998
5999         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6000
6001         tp->link_config.active_speed = current_speed;
6002         tp->link_config.active_duplex = current_duplex;
6003
6004         tg3_test_and_report_link_chg(tp, current_link_up);
6005         return err;
6006 }
6007
6008 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6009 {
6010         if (tp->serdes_counter) {
6011                 /* Give autoneg time to complete. */
6012                 tp->serdes_counter--;
6013                 return;
6014         }
6015
6016         if (!tp->link_up &&
6017             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6018                 u32 bmcr;
6019
6020                 tg3_readphy(tp, MII_BMCR, &bmcr);
6021                 if (bmcr & BMCR_ANENABLE) {
6022                         u32 phy1, phy2;
6023
6024                         /* Select shadow register 0x1f */
6025                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6026                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6027
6028                         /* Select expansion interrupt status register */
6029                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6030                                          MII_TG3_DSP_EXP1_INT_STAT);
6031                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6032                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6033
6034                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6035                                 /* We have signal detect and not receiving
6036                                  * config code words, link is up by parallel
6037                                  * detection.
6038                                  */
6039
6040                                 bmcr &= ~BMCR_ANENABLE;
6041                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6042                                 tg3_writephy(tp, MII_BMCR, bmcr);
6043                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6044                         }
6045                 }
6046         } else if (tp->link_up &&
6047                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6048                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6049                 u32 phy2;
6050
6051                 /* Select expansion interrupt status register */
6052                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6053                                  MII_TG3_DSP_EXP1_INT_STAT);
6054                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6055                 if (phy2 & 0x20) {
6056                         u32 bmcr;
6057
6058                         /* Config code words received, turn on autoneg. */
6059                         tg3_readphy(tp, MII_BMCR, &bmcr);
6060                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6061
6062                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6063
6064                 }
6065         }
6066 }
6067
6068 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6069 {
6070         u32 val;
6071         int err;
6072
6073         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6074                 err = tg3_setup_fiber_phy(tp, force_reset);
6075         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6076                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6077         else
6078                 err = tg3_setup_copper_phy(tp, force_reset);
6079
6080         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6081                 u32 scale;
6082
6083                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6084                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6085                         scale = 65;
6086                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6087                         scale = 6;
6088                 else
6089                         scale = 12;
6090
6091                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6092                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6093                 tw32(GRC_MISC_CFG, val);
6094         }
6095
6096         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6097               (6 << TX_LENGTHS_IPG_SHIFT);
6098         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6099             tg3_asic_rev(tp) == ASIC_REV_5762)
6100                 val |= tr32(MAC_TX_LENGTHS) &
6101                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6102                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6103
6104         if (tp->link_config.active_speed == SPEED_1000 &&
6105             tp->link_config.active_duplex == DUPLEX_HALF)
6106                 tw32(MAC_TX_LENGTHS, val |
6107                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6108         else
6109                 tw32(MAC_TX_LENGTHS, val |
6110                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6111
6112         if (!tg3_flag(tp, 5705_PLUS)) {
6113                 if (tp->link_up) {
6114                         tw32(HOSTCC_STAT_COAL_TICKS,
6115                              tp->coal.stats_block_coalesce_usecs);
6116                 } else {
6117                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6118                 }
6119         }
6120
6121         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6122                 val = tr32(PCIE_PWR_MGMT_THRESH);
6123                 if (!tp->link_up)
6124                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6125                               tp->pwrmgmt_thresh;
6126                 else
6127                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6128                 tw32(PCIE_PWR_MGMT_THRESH, val);
6129         }
6130
6131         return err;
6132 }
6133
6134 /* tp->lock must be held */
6135 static u64 tg3_refclk_read(struct tg3 *tp)
6136 {
6137         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6138         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6139 }
6140
6141 /* tp->lock must be held */
6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6143 {
6144         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6145
6146         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6150 }
6151
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6155 {
6156         struct tg3 *tp = netdev_priv(dev);
6157
6158         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6160                                 SOF_TIMESTAMPING_SOFTWARE;
6161
6162         if (tg3_flag(tp, PTP_CAPABLE)) {
6163                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164                                         SOF_TIMESTAMPING_RX_HARDWARE |
6165                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6166         }
6167
6168         if (tp->ptp_clock)
6169                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6170         else
6171                 info->phc_index = -1;
6172
6173         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6174
6175         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6179         return 0;
6180 }
6181
6182 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6183 {
6184         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6185         bool neg_adj = false;
6186         u32 correction = 0;
6187
6188         if (ppb < 0) {
6189                 neg_adj = true;
6190                 ppb = -ppb;
6191         }
6192
6193         /* Frequency adjustment is performed using hardware with a 24 bit
6194          * accumulator and a programmable correction value. On each clk, the
6195          * correction value gets added to the accumulator and when it
6196          * overflows, the time counter is incremented/decremented.
6197          *
6198          * So conversion from ppb to correction value is
6199          *              ppb * (1 << 24) / 1000000000
6200          */
6201         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6202                      TG3_EAV_REF_CLK_CORRECT_MASK;
6203
6204         tg3_full_lock(tp, 0);
6205
6206         if (correction)
6207                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6208                      TG3_EAV_REF_CLK_CORRECT_EN |
6209                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6210         else
6211                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6212
6213         tg3_full_unlock(tp);
6214
6215         return 0;
6216 }
6217
6218 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6219 {
6220         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6221
6222         tg3_full_lock(tp, 0);
6223         tp->ptp_adjust += delta;
6224         tg3_full_unlock(tp);
6225
6226         return 0;
6227 }
6228
6229 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6230 {
6231         u64 ns;
6232         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6233
6234         tg3_full_lock(tp, 0);
6235         ns = tg3_refclk_read(tp);
6236         ns += tp->ptp_adjust;
6237         tg3_full_unlock(tp);
6238
6239         *ts = ns_to_timespec64(ns);
6240
6241         return 0;
6242 }
6243
6244 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6245                            const struct timespec64 *ts)
6246 {
6247         u64 ns;
6248         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6249
6250         ns = timespec64_to_ns(ts);
6251
6252         tg3_full_lock(tp, 0);
6253         tg3_refclk_write(tp, ns);
6254         tp->ptp_adjust = 0;
6255         tg3_full_unlock(tp);
6256
6257         return 0;
6258 }
6259
6260 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6261                           struct ptp_clock_request *rq, int on)
6262 {
6263         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6264         u32 clock_ctl;
6265         int rval = 0;
6266
6267         switch (rq->type) {
6268         case PTP_CLK_REQ_PEROUT:
6269                 if (rq->perout.index != 0)
6270                         return -EINVAL;
6271
6272                 tg3_full_lock(tp, 0);
6273                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6274                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6275
6276                 if (on) {
6277                         u64 nsec;
6278
6279                         nsec = rq->perout.start.sec * 1000000000ULL +
6280                                rq->perout.start.nsec;
6281
6282                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6283                                 netdev_warn(tp->dev,
6284                                             "Device supports only a one-shot timesync output, period must be 0\n");
6285                                 rval = -EINVAL;
6286                                 goto err_out;
6287                         }
6288
6289                         if (nsec & (1ULL << 63)) {
6290                                 netdev_warn(tp->dev,
6291                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6292                                 rval = -EINVAL;
6293                                 goto err_out;
6294                         }
6295
6296                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6297                         tw32(TG3_EAV_WATCHDOG0_MSB,
6298                              TG3_EAV_WATCHDOG0_EN |
6299                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6300
6301                         tw32(TG3_EAV_REF_CLCK_CTL,
6302                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6303                 } else {
6304                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6305                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6306                 }
6307
6308 err_out:
6309                 tg3_full_unlock(tp);
6310                 return rval;
6311
6312         default:
6313                 break;
6314         }
6315
6316         return -EOPNOTSUPP;
6317 }
6318
6319 static const struct ptp_clock_info tg3_ptp_caps = {
6320         .owner          = THIS_MODULE,
6321         .name           = "tg3 clock",
6322         .max_adj        = 250000000,
6323         .n_alarm        = 0,
6324         .n_ext_ts       = 0,
6325         .n_per_out      = 1,
6326         .n_pins         = 0,
6327         .pps            = 0,
6328         .adjfreq        = tg3_ptp_adjfreq,
6329         .adjtime        = tg3_ptp_adjtime,
6330         .gettime64      = tg3_ptp_gettime,
6331         .settime64      = tg3_ptp_settime,
6332         .enable         = tg3_ptp_enable,
6333 };
6334
6335 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6336                                      struct skb_shared_hwtstamps *timestamp)
6337 {
6338         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6339         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6340                                            tp->ptp_adjust);
6341 }
6342
6343 /* tp->lock must be held */
6344 static void tg3_ptp_init(struct tg3 *tp)
6345 {
6346         if (!tg3_flag(tp, PTP_CAPABLE))
6347                 return;
6348
6349         /* Initialize the hardware clock to the system time. */
6350         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6351         tp->ptp_adjust = 0;
6352         tp->ptp_info = tg3_ptp_caps;
6353 }
6354
6355 /* tp->lock must be held */
6356 static void tg3_ptp_resume(struct tg3 *tp)
6357 {
6358         if (!tg3_flag(tp, PTP_CAPABLE))
6359                 return;
6360
6361         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6362         tp->ptp_adjust = 0;
6363 }
6364
6365 static void tg3_ptp_fini(struct tg3 *tp)
6366 {
6367         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6368                 return;
6369
6370         ptp_clock_unregister(tp->ptp_clock);
6371         tp->ptp_clock = NULL;
6372         tp->ptp_adjust = 0;
6373 }
6374
6375 static inline int tg3_irq_sync(struct tg3 *tp)
6376 {
6377         return tp->irq_sync;
6378 }
6379
6380 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6381 {
6382         int i;
6383
6384         dst = (u32 *)((u8 *)dst + off);
6385         for (i = 0; i < len; i += sizeof(u32))
6386                 *dst++ = tr32(off + i);
6387 }
6388
6389 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6390 {
6391         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6392         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6393         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6394         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6395         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6396         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6397         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6398         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6399         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6400         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6401         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6402         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6403         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6404         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6405         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6406         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6407         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6408         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6409         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6410
6411         if (tg3_flag(tp, SUPPORT_MSIX))
6412                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6413
6414         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6415         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6416         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6417         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6418         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6419         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6420         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6421         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6422
6423         if (!tg3_flag(tp, 5705_PLUS)) {
6424                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6425                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6426                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6427         }
6428
6429         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6430         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6431         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6432         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6433         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6434
6435         if (tg3_flag(tp, NVRAM))
6436                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6437 }
6438
6439 static void tg3_dump_state(struct tg3 *tp)
6440 {
6441         int i;
6442         u32 *regs;
6443
6444         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6445         if (!regs)
6446                 return;
6447
6448         if (tg3_flag(tp, PCI_EXPRESS)) {
6449                 /* Read up to but not including private PCI registers */
6450                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6451                         regs[i / sizeof(u32)] = tr32(i);
6452         } else
6453                 tg3_dump_legacy_regs(tp, regs);
6454
6455         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6456                 if (!regs[i + 0] && !regs[i + 1] &&
6457                     !regs[i + 2] && !regs[i + 3])
6458                         continue;
6459
6460                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6461                            i * 4,
6462                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6463         }
6464
6465         kfree(regs);
6466
6467         for (i = 0; i < tp->irq_cnt; i++) {
6468                 struct tg3_napi *tnapi = &tp->napi[i];
6469
6470                 /* SW status block */
6471                 netdev_err(tp->dev,
6472                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6473                            i,
6474                            tnapi->hw_status->status,
6475                            tnapi->hw_status->status_tag,
6476                            tnapi->hw_status->rx_jumbo_consumer,
6477                            tnapi->hw_status->rx_consumer,
6478                            tnapi->hw_status->rx_mini_consumer,
6479                            tnapi->hw_status->idx[0].rx_producer,
6480                            tnapi->hw_status->idx[0].tx_consumer);
6481
6482                 netdev_err(tp->dev,
6483                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6484                            i,
6485                            tnapi->last_tag, tnapi->last_irq_tag,
6486                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6487                            tnapi->rx_rcb_ptr,
6488                            tnapi->prodring.rx_std_prod_idx,
6489                            tnapi->prodring.rx_std_cons_idx,
6490                            tnapi->prodring.rx_jmb_prod_idx,
6491                            tnapi->prodring.rx_jmb_cons_idx);
6492         }
6493 }
6494
6495 /* This is called whenever we suspect that the system chipset is re-
6496  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6497  * is bogus tx completions. We try to recover by setting the
6498  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6499  * in the workqueue.
6500  */
6501 static void tg3_tx_recover(struct tg3 *tp)
6502 {
6503         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6504                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6505
6506         netdev_warn(tp->dev,
6507                     "The system may be re-ordering memory-mapped I/O "
6508                     "cycles to the network device, attempting to recover. "
6509                     "Please report the problem to the driver maintainer "
6510                     "and include system chipset information.\n");
6511
6512         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6513 }
6514
6515 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6516 {
6517         /* Tell compiler to fetch tx indices from memory. */
6518         barrier();
6519         return tnapi->tx_pending -
6520                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6521 }
6522
6523 /* Tigon3 never reports partial packet sends.  So we do not
6524  * need special logic to handle SKBs that have not had all
6525  * of their frags sent yet, like SunGEM does.
6526  */
6527 static void tg3_tx(struct tg3_napi *tnapi)
6528 {
6529         struct tg3 *tp = tnapi->tp;
6530         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6531         u32 sw_idx = tnapi->tx_cons;
6532         struct netdev_queue *txq;
6533         int index = tnapi - tp->napi;
6534         unsigned int pkts_compl = 0, bytes_compl = 0;
6535
6536         if (tg3_flag(tp, ENABLE_TSS))
6537                 index--;
6538
6539         txq = netdev_get_tx_queue(tp->dev, index);
6540
6541         while (sw_idx != hw_idx) {
6542                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6543                 struct sk_buff *skb = ri->skb;
6544                 int i, tx_bug = 0;
6545
6546                 if (unlikely(skb == NULL)) {
6547                         tg3_tx_recover(tp);
6548                         return;
6549                 }
6550
6551                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6552                         struct skb_shared_hwtstamps timestamp;
6553                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6554                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6555
6556                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6557
6558                         skb_tstamp_tx(skb, &timestamp);
6559                 }
6560
6561                 pci_unmap_single(tp->pdev,
6562                                  dma_unmap_addr(ri, mapping),
6563                                  skb_headlen(skb),
6564                                  PCI_DMA_TODEVICE);
6565
6566                 ri->skb = NULL;
6567
6568                 while (ri->fragmented) {
6569                         ri->fragmented = false;
6570                         sw_idx = NEXT_TX(sw_idx);
6571                         ri = &tnapi->tx_buffers[sw_idx];
6572                 }
6573
6574                 sw_idx = NEXT_TX(sw_idx);
6575
6576                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6577                         ri = &tnapi->tx_buffers[sw_idx];
6578                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6579                                 tx_bug = 1;
6580
6581                         pci_unmap_page(tp->pdev,
6582                                        dma_unmap_addr(ri, mapping),
6583                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6584                                        PCI_DMA_TODEVICE);
6585
6586                         while (ri->fragmented) {
6587                                 ri->fragmented = false;
6588                                 sw_idx = NEXT_TX(sw_idx);
6589                                 ri = &tnapi->tx_buffers[sw_idx];
6590                         }
6591
6592                         sw_idx = NEXT_TX(sw_idx);
6593                 }
6594
6595                 pkts_compl++;
6596                 bytes_compl += skb->len;
6597
6598                 dev_consume_skb_any(skb);
6599
6600                 if (unlikely(tx_bug)) {
6601                         tg3_tx_recover(tp);
6602                         return;
6603                 }
6604         }
6605
6606         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6607
6608         tnapi->tx_cons = sw_idx;
6609
6610         /* Need to make the tx_cons update visible to tg3_start_xmit()
6611          * before checking for netif_queue_stopped().  Without the
6612          * memory barrier, there is a small possibility that tg3_start_xmit()
6613          * will miss it and cause the queue to be stopped forever.
6614          */
6615         smp_mb();
6616
6617         if (unlikely(netif_tx_queue_stopped(txq) &&
6618                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6619                 __netif_tx_lock(txq, smp_processor_id());
6620                 if (netif_tx_queue_stopped(txq) &&
6621                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6622                         netif_tx_wake_queue(txq);
6623                 __netif_tx_unlock(txq);
6624         }
6625 }
6626
6627 static void tg3_frag_free(bool is_frag, void *data)
6628 {
6629         if (is_frag)
6630                 skb_free_frag(data);
6631         else
6632                 kfree(data);
6633 }
6634
6635 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6636 {
6637         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6638                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6639
6640         if (!ri->data)
6641                 return;
6642
6643         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6644                          map_sz, PCI_DMA_FROMDEVICE);
6645         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6646         ri->data = NULL;
6647 }
6648
6649
6650 /* Returns size of skb allocated or < 0 on error.
6651  *
6652  * We only need to fill in the address because the other members
6653  * of the RX descriptor are invariant, see tg3_init_rings.
6654  *
6655  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6656  * posting buffers we only dirty the first cache line of the RX
6657  * descriptor (containing the address).  Whereas for the RX status
6658  * buffers the cpu only reads the last cacheline of the RX descriptor
6659  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6660  */
6661 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6662                              u32 opaque_key, u32 dest_idx_unmasked,
6663                              unsigned int *frag_size)
6664 {
6665         struct tg3_rx_buffer_desc *desc;
6666         struct ring_info *map;
6667         u8 *data;
6668         dma_addr_t mapping;
6669         int skb_size, data_size, dest_idx;
6670
6671         switch (opaque_key) {
6672         case RXD_OPAQUE_RING_STD:
6673                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6674                 desc = &tpr->rx_std[dest_idx];
6675                 map = &tpr->rx_std_buffers[dest_idx];
6676                 data_size = tp->rx_pkt_map_sz;
6677                 break;
6678
6679         case RXD_OPAQUE_RING_JUMBO:
6680                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6681                 desc = &tpr->rx_jmb[dest_idx].std;
6682                 map = &tpr->rx_jmb_buffers[dest_idx];
6683                 data_size = TG3_RX_JMB_MAP_SZ;
6684                 break;
6685
6686         default:
6687                 return -EINVAL;
6688         }
6689
6690         /* Do not overwrite any of the map or rp information
6691          * until we are sure we can commit to a new buffer.
6692          *
6693          * Callers depend upon this behavior and assume that
6694          * we leave everything unchanged if we fail.
6695          */
6696         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6697                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6698         if (skb_size <= PAGE_SIZE) {
6699                 data = netdev_alloc_frag(skb_size);
6700                 *frag_size = skb_size;
6701         } else {
6702                 data = kmalloc(skb_size, GFP_ATOMIC);
6703                 *frag_size = 0;
6704         }
6705         if (!data)
6706                 return -ENOMEM;
6707
6708         mapping = pci_map_single(tp->pdev,
6709                                  data + TG3_RX_OFFSET(tp),
6710                                  data_size,
6711                                  PCI_DMA_FROMDEVICE);
6712         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6713                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6714                 return -EIO;
6715         }
6716
6717         map->data = data;
6718         dma_unmap_addr_set(map, mapping, mapping);
6719
6720         desc->addr_hi = ((u64)mapping >> 32);
6721         desc->addr_lo = ((u64)mapping & 0xffffffff);
6722
6723         return data_size;
6724 }
6725
6726 /* We only need to move over in the address because the other
6727  * members of the RX descriptor are invariant.  See notes above
6728  * tg3_alloc_rx_data for full details.
6729  */
6730 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6731                            struct tg3_rx_prodring_set *dpr,
6732                            u32 opaque_key, int src_idx,
6733                            u32 dest_idx_unmasked)
6734 {
6735         struct tg3 *tp = tnapi->tp;
6736         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6737         struct ring_info *src_map, *dest_map;
6738         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6739         int dest_idx;
6740
6741         switch (opaque_key) {
6742         case RXD_OPAQUE_RING_STD:
6743                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6744                 dest_desc = &dpr->rx_std[dest_idx];
6745                 dest_map = &dpr->rx_std_buffers[dest_idx];
6746                 src_desc = &spr->rx_std[src_idx];
6747                 src_map = &spr->rx_std_buffers[src_idx];
6748                 break;
6749
6750         case RXD_OPAQUE_RING_JUMBO:
6751                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6752                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6753                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6754                 src_desc = &spr->rx_jmb[src_idx].std;
6755                 src_map = &spr->rx_jmb_buffers[src_idx];
6756                 break;
6757
6758         default:
6759                 return;
6760         }
6761
6762         dest_map->data = src_map->data;
6763         dma_unmap_addr_set(dest_map, mapping,
6764                            dma_unmap_addr(src_map, mapping));
6765         dest_desc->addr_hi = src_desc->addr_hi;
6766         dest_desc->addr_lo = src_desc->addr_lo;
6767
6768         /* Ensure that the update to the skb happens after the physical
6769          * addresses have been transferred to the new BD location.
6770          */
6771         smp_wmb();
6772
6773         src_map->data = NULL;
6774 }
6775
6776 /* The RX ring scheme is composed of multiple rings which post fresh
6777  * buffers to the chip, and one special ring the chip uses to report
6778  * status back to the host.
6779  *
6780  * The special ring reports the status of received packets to the
6781  * host.  The chip does not write into the original descriptor the
6782  * RX buffer was obtained from.  The chip simply takes the original
6783  * descriptor as provided by the host, updates the status and length
6784  * field, then writes this into the next status ring entry.
6785  *
6786  * Each ring the host uses to post buffers to the chip is described
6787  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6788  * it is first placed into the on-chip ram.  When the packet's length
6789  * is known, it walks down the TG3_BDINFO entries to select the ring.
6790  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6791  * which is within the range of the new packet's length is chosen.
6792  *
6793  * The "separate ring for rx status" scheme may sound queer, but it makes
6794  * sense from a cache coherency perspective.  If only the host writes
6795  * to the buffer post rings, and only the chip writes to the rx status
6796  * rings, then cache lines never move beyond shared-modified state.
6797  * If both the host and chip were to write into the same ring, cache line
6798  * eviction could occur since both entities want it in an exclusive state.
6799  */
6800 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6801 {
6802         struct tg3 *tp = tnapi->tp;
6803         u32 work_mask, rx_std_posted = 0;
6804         u32 std_prod_idx, jmb_prod_idx;
6805         u32 sw_idx = tnapi->rx_rcb_ptr;
6806         u16 hw_idx;
6807         int received;
6808         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6809
6810         hw_idx = *(tnapi->rx_rcb_prod_idx);
6811         /*
6812          * We need to order the read of hw_idx and the read of
6813          * the opaque cookie.
6814          */
6815         rmb();
6816         work_mask = 0;
6817         received = 0;
6818         std_prod_idx = tpr->rx_std_prod_idx;
6819         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6820         while (sw_idx != hw_idx && budget > 0) {
6821                 struct ring_info *ri;
6822                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6823                 unsigned int len;
6824                 struct sk_buff *skb;
6825                 dma_addr_t dma_addr;
6826                 u32 opaque_key, desc_idx, *post_ptr;
6827                 u8 *data;
6828                 u64 tstamp = 0;
6829
6830                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6831                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6832                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6833                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6834                         dma_addr = dma_unmap_addr(ri, mapping);
6835                         data = ri->data;
6836                         post_ptr = &std_prod_idx;
6837                         rx_std_posted++;
6838                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6839                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6840                         dma_addr = dma_unmap_addr(ri, mapping);
6841                         data = ri->data;
6842                         post_ptr = &jmb_prod_idx;
6843                 } else
6844                         goto next_pkt_nopost;
6845
6846                 work_mask |= opaque_key;
6847
6848                 if (desc->err_vlan & RXD_ERR_MASK) {
6849                 drop_it:
6850                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6851                                        desc_idx, *post_ptr);
6852                 drop_it_no_recycle:
6853                         /* Other statistics kept track of by card. */
6854                         tp->rx_dropped++;
6855                         goto next_pkt;
6856                 }
6857
6858                 prefetch(data + TG3_RX_OFFSET(tp));
6859                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6860                       ETH_FCS_LEN;
6861
6862                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6863                      RXD_FLAG_PTPSTAT_PTPV1 ||
6864                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865                      RXD_FLAG_PTPSTAT_PTPV2) {
6866                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6867                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6868                 }
6869
6870                 if (len > TG3_RX_COPY_THRESH(tp)) {
6871                         int skb_size;
6872                         unsigned int frag_size;
6873
6874                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6875                                                     *post_ptr, &frag_size);
6876                         if (skb_size < 0)
6877                                 goto drop_it;
6878
6879                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6880                                          PCI_DMA_FROMDEVICE);
6881
6882                         /* Ensure that the update to the data happens
6883                          * after the usage of the old DMA mapping.
6884                          */
6885                         smp_wmb();
6886
6887                         ri->data = NULL;
6888
6889                         skb = build_skb(data, frag_size);
6890                         if (!skb) {
6891                                 tg3_frag_free(frag_size != 0, data);
6892                                 goto drop_it_no_recycle;
6893                         }
6894                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6895                 } else {
6896                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6897                                        desc_idx, *post_ptr);
6898
6899                         skb = netdev_alloc_skb(tp->dev,
6900                                                len + TG3_RAW_IP_ALIGN);
6901                         if (skb == NULL)
6902                                 goto drop_it_no_recycle;
6903
6904                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6905                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6906                         memcpy(skb->data,
6907                                data + TG3_RX_OFFSET(tp),
6908                                len);
6909                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6910                 }
6911
6912                 skb_put(skb, len);
6913                 if (tstamp)
6914                         tg3_hwclock_to_timestamp(tp, tstamp,
6915                                                  skb_hwtstamps(skb));
6916
6917                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6918                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6919                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6920                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6921                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6922                 else
6923                         skb_checksum_none_assert(skb);
6924
6925                 skb->protocol = eth_type_trans(skb, tp->dev);
6926
6927                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6928                     skb->protocol != htons(ETH_P_8021Q) &&
6929                     skb->protocol != htons(ETH_P_8021AD)) {
6930                         dev_kfree_skb_any(skb);
6931                         goto drop_it_no_recycle;
6932                 }
6933
6934                 if (desc->type_flags & RXD_FLAG_VLAN &&
6935                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6936                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6937                                                desc->err_vlan & RXD_VLAN_MASK);
6938
6939                 napi_gro_receive(&tnapi->napi, skb);
6940
6941                 received++;
6942                 budget--;
6943
6944 next_pkt:
6945                 (*post_ptr)++;
6946
6947                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6948                         tpr->rx_std_prod_idx = std_prod_idx &
6949                                                tp->rx_std_ring_mask;
6950                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6951                                      tpr->rx_std_prod_idx);
6952                         work_mask &= ~RXD_OPAQUE_RING_STD;
6953                         rx_std_posted = 0;
6954                 }
6955 next_pkt_nopost:
6956                 sw_idx++;
6957                 sw_idx &= tp->rx_ret_ring_mask;
6958
6959                 /* Refresh hw_idx to see if there is new work */
6960                 if (sw_idx == hw_idx) {
6961                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6962                         rmb();
6963                 }
6964         }
6965
6966         /* ACK the status ring. */
6967         tnapi->rx_rcb_ptr = sw_idx;
6968         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6969
6970         /* Refill RX ring(s). */
6971         if (!tg3_flag(tp, ENABLE_RSS)) {
6972                 /* Sync BD data before updating mailbox */
6973                 wmb();
6974
6975                 if (work_mask & RXD_OPAQUE_RING_STD) {
6976                         tpr->rx_std_prod_idx = std_prod_idx &
6977                                                tp->rx_std_ring_mask;
6978                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6979                                      tpr->rx_std_prod_idx);
6980                 }
6981                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6982                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6983                                                tp->rx_jmb_ring_mask;
6984                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6985                                      tpr->rx_jmb_prod_idx);
6986                 }
6987                 mmiowb();
6988         } else if (work_mask) {
6989                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6990                  * updated before the producer indices can be updated.
6991                  */
6992                 smp_wmb();
6993
6994                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6995                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6996
6997                 if (tnapi != &tp->napi[1]) {
6998                         tp->rx_refill = true;
6999                         napi_schedule(&tp->napi[1].napi);
7000                 }
7001         }
7002
7003         return received;
7004 }
7005
7006 static void tg3_poll_link(struct tg3 *tp)
7007 {
7008         /* handle link change and other phy events */
7009         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7010                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7011
7012                 if (sblk->status & SD_STATUS_LINK_CHG) {
7013                         sblk->status = SD_STATUS_UPDATED |
7014                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7015                         spin_lock(&tp->lock);
7016                         if (tg3_flag(tp, USE_PHYLIB)) {
7017                                 tw32_f(MAC_STATUS,
7018                                      (MAC_STATUS_SYNC_CHANGED |
7019                                       MAC_STATUS_CFG_CHANGED |
7020                                       MAC_STATUS_MI_COMPLETION |
7021                                       MAC_STATUS_LNKSTATE_CHANGED));
7022                                 udelay(40);
7023                         } else
7024                                 tg3_setup_phy(tp, false);
7025                         spin_unlock(&tp->lock);
7026                 }
7027         }
7028 }
7029
7030 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7031                                 struct tg3_rx_prodring_set *dpr,
7032                                 struct tg3_rx_prodring_set *spr)
7033 {
7034         u32 si, di, cpycnt, src_prod_idx;
7035         int i, err = 0;
7036
7037         while (1) {
7038                 src_prod_idx = spr->rx_std_prod_idx;
7039
7040                 /* Make sure updates to the rx_std_buffers[] entries and the
7041                  * standard producer index are seen in the correct order.
7042                  */
7043                 smp_rmb();
7044
7045                 if (spr->rx_std_cons_idx == src_prod_idx)
7046                         break;
7047
7048                 if (spr->rx_std_cons_idx < src_prod_idx)
7049                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7050                 else
7051                         cpycnt = tp->rx_std_ring_mask + 1 -
7052                                  spr->rx_std_cons_idx;
7053
7054                 cpycnt = min(cpycnt,
7055                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7056
7057                 si = spr->rx_std_cons_idx;
7058                 di = dpr->rx_std_prod_idx;
7059
7060                 for (i = di; i < di + cpycnt; i++) {
7061                         if (dpr->rx_std_buffers[i].data) {
7062                                 cpycnt = i - di;
7063                                 err = -ENOSPC;
7064                                 break;
7065                         }
7066                 }
7067
7068                 if (!cpycnt)
7069                         break;
7070
7071                 /* Ensure that updates to the rx_std_buffers ring and the
7072                  * shadowed hardware producer ring from tg3_recycle_skb() are
7073                  * ordered correctly WRT the skb check above.
7074                  */
7075                 smp_rmb();
7076
7077                 memcpy(&dpr->rx_std_buffers[di],
7078                        &spr->rx_std_buffers[si],
7079                        cpycnt * sizeof(struct ring_info));
7080
7081                 for (i = 0; i < cpycnt; i++, di++, si++) {
7082                         struct tg3_rx_buffer_desc *sbd, *dbd;
7083                         sbd = &spr->rx_std[si];
7084                         dbd = &dpr->rx_std[di];
7085                         dbd->addr_hi = sbd->addr_hi;
7086                         dbd->addr_lo = sbd->addr_lo;
7087                 }
7088
7089                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7090                                        tp->rx_std_ring_mask;
7091                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7092                                        tp->rx_std_ring_mask;
7093         }
7094
7095         while (1) {
7096                 src_prod_idx = spr->rx_jmb_prod_idx;
7097
7098                 /* Make sure updates to the rx_jmb_buffers[] entries and
7099                  * the jumbo producer index are seen in the correct order.
7100                  */
7101                 smp_rmb();
7102
7103                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7104                         break;
7105
7106                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7107                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7108                 else
7109                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7110                                  spr->rx_jmb_cons_idx;
7111
7112                 cpycnt = min(cpycnt,
7113                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7114
7115                 si = spr->rx_jmb_cons_idx;
7116                 di = dpr->rx_jmb_prod_idx;
7117
7118                 for (i = di; i < di + cpycnt; i++) {
7119                         if (dpr->rx_jmb_buffers[i].data) {
7120                                 cpycnt = i - di;
7121                                 err = -ENOSPC;
7122                                 break;
7123                         }
7124                 }
7125
7126                 if (!cpycnt)
7127                         break;
7128
7129                 /* Ensure that updates to the rx_jmb_buffers ring and the
7130                  * shadowed hardware producer ring from tg3_recycle_skb() are
7131                  * ordered correctly WRT the skb check above.
7132                  */
7133                 smp_rmb();
7134
7135                 memcpy(&dpr->rx_jmb_buffers[di],
7136                        &spr->rx_jmb_buffers[si],
7137                        cpycnt * sizeof(struct ring_info));
7138
7139                 for (i = 0; i < cpycnt; i++, di++, si++) {
7140                         struct tg3_rx_buffer_desc *sbd, *dbd;
7141                         sbd = &spr->rx_jmb[si].std;
7142                         dbd = &dpr->rx_jmb[di].std;
7143                         dbd->addr_hi = sbd->addr_hi;
7144                         dbd->addr_lo = sbd->addr_lo;
7145                 }
7146
7147                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7148                                        tp->rx_jmb_ring_mask;
7149                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7150                                        tp->rx_jmb_ring_mask;
7151         }
7152
7153         return err;
7154 }
7155
7156 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7157 {
7158         struct tg3 *tp = tnapi->tp;
7159
7160         /* run TX completion thread */
7161         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7162                 tg3_tx(tnapi);
7163                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7164                         return work_done;
7165         }
7166
7167         if (!tnapi->rx_rcb_prod_idx)
7168                 return work_done;
7169
7170         /* run RX thread, within the bounds set by NAPI.
7171          * All RX "locking" is done by ensuring outside
7172          * code synchronizes with tg3->napi.poll()
7173          */
7174         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7175                 work_done += tg3_rx(tnapi, budget - work_done);
7176
7177         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7178                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7179                 int i, err = 0;
7180                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7181                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7182
7183                 tp->rx_refill = false;
7184                 for (i = 1; i <= tp->rxq_cnt; i++)
7185                         err |= tg3_rx_prodring_xfer(tp, dpr,
7186                                                     &tp->napi[i].prodring);
7187
7188                 wmb();
7189
7190                 if (std_prod_idx != dpr->rx_std_prod_idx)
7191                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7192                                      dpr->rx_std_prod_idx);
7193
7194                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7195                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7196                                      dpr->rx_jmb_prod_idx);
7197
7198                 mmiowb();
7199
7200                 if (err)
7201                         tw32_f(HOSTCC_MODE, tp->coal_now);
7202         }
7203
7204         return work_done;
7205 }
7206
7207 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7208 {
7209         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210                 schedule_work(&tp->reset_task);
7211 }
7212
7213 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7214 {
7215         cancel_work_sync(&tp->reset_task);
7216         tg3_flag_clear(tp, RESET_TASK_PENDING);
7217         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7218 }
7219
7220 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7221 {
7222         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7223         struct tg3 *tp = tnapi->tp;
7224         int work_done = 0;
7225         struct tg3_hw_status *sblk = tnapi->hw_status;
7226
7227         while (1) {
7228                 work_done = tg3_poll_work(tnapi, work_done, budget);
7229
7230                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7231                         goto tx_recovery;
7232
7233                 if (unlikely(work_done >= budget))
7234                         break;
7235
7236                 /* tp->last_tag is used in tg3_int_reenable() below
7237                  * to tell the hw how much work has been processed,
7238                  * so we must read it before checking for more work.
7239                  */
7240                 tnapi->last_tag = sblk->status_tag;
7241                 tnapi->last_irq_tag = tnapi->last_tag;
7242                 rmb();
7243
7244                 /* check for RX/TX work to do */
7245                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7246                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7247
7248                         /* This test here is not race free, but will reduce
7249                          * the number of interrupts by looping again.
7250                          */
7251                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7252                                 continue;
7253
7254                         napi_complete_done(napi, work_done);
7255                         /* Reenable interrupts. */
7256                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7257
7258                         /* This test here is synchronized by napi_schedule()
7259                          * and napi_complete() to close the race condition.
7260                          */
7261                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7262                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7263                                                   HOSTCC_MODE_ENABLE |
7264                                                   tnapi->coal_now);
7265                         }
7266                         mmiowb();
7267                         break;
7268                 }
7269         }
7270
7271         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7272         return work_done;
7273
7274 tx_recovery:
7275         /* work_done is guaranteed to be less than budget. */
7276         napi_complete(napi);
7277         tg3_reset_task_schedule(tp);
7278         return work_done;
7279 }
7280
7281 static void tg3_process_error(struct tg3 *tp)
7282 {
7283         u32 val;
7284         bool real_error = false;
7285
7286         if (tg3_flag(tp, ERROR_PROCESSED))
7287                 return;
7288
7289         /* Check Flow Attention register */
7290         val = tr32(HOSTCC_FLOW_ATTN);
7291         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7292                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7293                 real_error = true;
7294         }
7295
7296         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7297                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7298                 real_error = true;
7299         }
7300
7301         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7302                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7303                 real_error = true;
7304         }
7305
7306         if (!real_error)
7307                 return;
7308
7309         tg3_dump_state(tp);
7310
7311         tg3_flag_set(tp, ERROR_PROCESSED);
7312         tg3_reset_task_schedule(tp);
7313 }
7314
7315 static int tg3_poll(struct napi_struct *napi, int budget)
7316 {
7317         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7318         struct tg3 *tp = tnapi->tp;
7319         int work_done = 0;
7320         struct tg3_hw_status *sblk = tnapi->hw_status;
7321
7322         while (1) {
7323                 if (sblk->status & SD_STATUS_ERROR)
7324                         tg3_process_error(tp);
7325
7326                 tg3_poll_link(tp);
7327
7328                 work_done = tg3_poll_work(tnapi, work_done, budget);
7329
7330                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7331                         goto tx_recovery;
7332
7333                 if (unlikely(work_done >= budget))
7334                         break;
7335
7336                 if (tg3_flag(tp, TAGGED_STATUS)) {
7337                         /* tp->last_tag is used in tg3_int_reenable() below
7338                          * to tell the hw how much work has been processed,
7339                          * so we must read it before checking for more work.
7340                          */
7341                         tnapi->last_tag = sblk->status_tag;
7342                         tnapi->last_irq_tag = tnapi->last_tag;
7343                         rmb();
7344                 } else
7345                         sblk->status &= ~SD_STATUS_UPDATED;
7346
7347                 if (likely(!tg3_has_work(tnapi))) {
7348                         napi_complete_done(napi, work_done);
7349                         tg3_int_reenable(tnapi);
7350                         break;
7351                 }
7352         }
7353
7354         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7355         return work_done;
7356
7357 tx_recovery:
7358         /* work_done is guaranteed to be less than budget. */
7359         napi_complete(napi);
7360         tg3_reset_task_schedule(tp);
7361         return work_done;
7362 }
7363
7364 static void tg3_napi_disable(struct tg3 *tp)
7365 {
7366         int i;
7367
7368         for (i = tp->irq_cnt - 1; i >= 0; i--)
7369                 napi_disable(&tp->napi[i].napi);
7370 }
7371
7372 static void tg3_napi_enable(struct tg3 *tp)
7373 {
7374         int i;
7375
7376         for (i = 0; i < tp->irq_cnt; i++)
7377                 napi_enable(&tp->napi[i].napi);
7378 }
7379
7380 static void tg3_napi_init(struct tg3 *tp)
7381 {
7382         int i;
7383
7384         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7385         for (i = 1; i < tp->irq_cnt; i++)
7386                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7387 }
7388
7389 static void tg3_napi_fini(struct tg3 *tp)
7390 {
7391         int i;
7392
7393         for (i = 0; i < tp->irq_cnt; i++)
7394                 netif_napi_del(&tp->napi[i].napi);
7395 }
7396
7397 static inline void tg3_netif_stop(struct tg3 *tp)
7398 {
7399         netif_trans_update(tp->dev);    /* prevent tx timeout */
7400         tg3_napi_disable(tp);
7401         netif_carrier_off(tp->dev);
7402         netif_tx_disable(tp->dev);
7403 }
7404
7405 /* tp->lock must be held */
7406 static inline void tg3_netif_start(struct tg3 *tp)
7407 {
7408         tg3_ptp_resume(tp);
7409
7410         /* NOTE: unconditional netif_tx_wake_all_queues is only
7411          * appropriate so long as all callers are assured to
7412          * have free tx slots (such as after tg3_init_hw)
7413          */
7414         netif_tx_wake_all_queues(tp->dev);
7415
7416         if (tp->link_up)
7417                 netif_carrier_on(tp->dev);
7418
7419         tg3_napi_enable(tp);
7420         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7421         tg3_enable_ints(tp);
7422 }
7423
7424 static void tg3_irq_quiesce(struct tg3 *tp)
7425         __releases(tp->lock)
7426         __acquires(tp->lock)
7427 {
7428         int i;
7429
7430         BUG_ON(tp->irq_sync);
7431
7432         tp->irq_sync = 1;
7433         smp_mb();
7434
7435         spin_unlock_bh(&tp->lock);
7436
7437         for (i = 0; i < tp->irq_cnt; i++)
7438                 synchronize_irq(tp->napi[i].irq_vec);
7439
7440         spin_lock_bh(&tp->lock);
7441 }
7442
7443 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7444  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7445  * with as well.  Most of the time, this is not necessary except when
7446  * shutting down the device.
7447  */
7448 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7449 {
7450         spin_lock_bh(&tp->lock);
7451         if (irq_sync)
7452                 tg3_irq_quiesce(tp);
7453 }
7454
7455 static inline void tg3_full_unlock(struct tg3 *tp)
7456 {
7457         spin_unlock_bh(&tp->lock);
7458 }
7459
7460 /* One-shot MSI handler - Chip automatically disables interrupt
7461  * after sending MSI so driver doesn't have to do it.
7462  */
7463 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7464 {
7465         struct tg3_napi *tnapi = dev_id;
7466         struct tg3 *tp = tnapi->tp;
7467
7468         prefetch(tnapi->hw_status);
7469         if (tnapi->rx_rcb)
7470                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7471
7472         if (likely(!tg3_irq_sync(tp)))
7473                 napi_schedule(&tnapi->napi);
7474
7475         return IRQ_HANDLED;
7476 }
7477
7478 /* MSI ISR - No need to check for interrupt sharing and no need to
7479  * flush status block and interrupt mailbox. PCI ordering rules
7480  * guarantee that MSI will arrive after the status block.
7481  */
7482 static irqreturn_t tg3_msi(int irq, void *dev_id)
7483 {
7484         struct tg3_napi *tnapi = dev_id;
7485         struct tg3 *tp = tnapi->tp;
7486
7487         prefetch(tnapi->hw_status);
7488         if (tnapi->rx_rcb)
7489                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7490         /*
7491          * Writing any value to intr-mbox-0 clears PCI INTA# and
7492          * chip-internal interrupt pending events.
7493          * Writing non-zero to intr-mbox-0 additional tells the
7494          * NIC to stop sending us irqs, engaging "in-intr-handler"
7495          * event coalescing.
7496          */
7497         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7498         if (likely(!tg3_irq_sync(tp)))
7499                 napi_schedule(&tnapi->napi);
7500
7501         return IRQ_RETVAL(1);
7502 }
7503
7504 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7505 {
7506         struct tg3_napi *tnapi = dev_id;
7507         struct tg3 *tp = tnapi->tp;
7508         struct tg3_hw_status *sblk = tnapi->hw_status;
7509         unsigned int handled = 1;
7510
7511         /* In INTx mode, it is possible for the interrupt to arrive at
7512          * the CPU before the status block posted prior to the interrupt.
7513          * Reading the PCI State register will confirm whether the
7514          * interrupt is ours and will flush the status block.
7515          */
7516         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7517                 if (tg3_flag(tp, CHIP_RESETTING) ||
7518                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7519                         handled = 0;
7520                         goto out;
7521                 }
7522         }
7523
7524         /*
7525          * Writing any value to intr-mbox-0 clears PCI INTA# and
7526          * chip-internal interrupt pending events.
7527          * Writing non-zero to intr-mbox-0 additional tells the
7528          * NIC to stop sending us irqs, engaging "in-intr-handler"
7529          * event coalescing.
7530          *
7531          * Flush the mailbox to de-assert the IRQ immediately to prevent
7532          * spurious interrupts.  The flush impacts performance but
7533          * excessive spurious interrupts can be worse in some cases.
7534          */
7535         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7536         if (tg3_irq_sync(tp))
7537                 goto out;
7538         sblk->status &= ~SD_STATUS_UPDATED;
7539         if (likely(tg3_has_work(tnapi))) {
7540                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7541                 napi_schedule(&tnapi->napi);
7542         } else {
7543                 /* No work, shared interrupt perhaps?  re-enable
7544                  * interrupts, and flush that PCI write
7545                  */
7546                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7547                                0x00000000);
7548         }
7549 out:
7550         return IRQ_RETVAL(handled);
7551 }
7552
7553 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7554 {
7555         struct tg3_napi *tnapi = dev_id;
7556         struct tg3 *tp = tnapi->tp;
7557         struct tg3_hw_status *sblk = tnapi->hw_status;
7558         unsigned int handled = 1;
7559
7560         /* In INTx mode, it is possible for the interrupt to arrive at
7561          * the CPU before the status block posted prior to the interrupt.
7562          * Reading the PCI State register will confirm whether the
7563          * interrupt is ours and will flush the status block.
7564          */
7565         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7566                 if (tg3_flag(tp, CHIP_RESETTING) ||
7567                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7568                         handled = 0;
7569                         goto out;
7570                 }
7571         }
7572
7573         /*
7574          * writing any value to intr-mbox-0 clears PCI INTA# and
7575          * chip-internal interrupt pending events.
7576          * writing non-zero to intr-mbox-0 additional tells the
7577          * NIC to stop sending us irqs, engaging "in-intr-handler"
7578          * event coalescing.
7579          *
7580          * Flush the mailbox to de-assert the IRQ immediately to prevent
7581          * spurious interrupts.  The flush impacts performance but
7582          * excessive spurious interrupts can be worse in some cases.
7583          */
7584         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7585
7586         /*
7587          * In a shared interrupt configuration, sometimes other devices'
7588          * interrupts will scream.  We record the current status tag here
7589          * so that the above check can report that the screaming interrupts
7590          * are unhandled.  Eventually they will be silenced.
7591          */
7592         tnapi->last_irq_tag = sblk->status_tag;
7593
7594         if (tg3_irq_sync(tp))
7595                 goto out;
7596
7597         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7598
7599         napi_schedule(&tnapi->napi);
7600
7601 out:
7602         return IRQ_RETVAL(handled);
7603 }
7604
7605 /* ISR for interrupt test */
7606 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7607 {
7608         struct tg3_napi *tnapi = dev_id;
7609         struct tg3 *tp = tnapi->tp;
7610         struct tg3_hw_status *sblk = tnapi->hw_status;
7611
7612         if ((sblk->status & SD_STATUS_UPDATED) ||
7613             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7614                 tg3_disable_ints(tp);
7615                 return IRQ_RETVAL(1);
7616         }
7617         return IRQ_RETVAL(0);
7618 }
7619
7620 #ifdef CONFIG_NET_POLL_CONTROLLER
7621 static void tg3_poll_controller(struct net_device *dev)
7622 {
7623         int i;
7624         struct tg3 *tp = netdev_priv(dev);
7625
7626         if (tg3_irq_sync(tp))
7627                 return;
7628
7629         for (i = 0; i < tp->irq_cnt; i++)
7630                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7631 }
7632 #endif
7633
7634 static void tg3_tx_timeout(struct net_device *dev)
7635 {
7636         struct tg3 *tp = netdev_priv(dev);
7637
7638         if (netif_msg_tx_err(tp)) {
7639                 netdev_err(dev, "transmit timed out, resetting\n");
7640                 tg3_dump_state(tp);
7641         }
7642
7643         tg3_reset_task_schedule(tp);
7644 }
7645
7646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7648 {
7649         u32 base = (u32) mapping & 0xffffffff;
7650
7651         return base + len + 8 < base;
7652 }
7653
7654 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7655  * of any 4GB boundaries: 4G, 8G, etc
7656  */
7657 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7658                                            u32 len, u32 mss)
7659 {
7660         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7661                 u32 base = (u32) mapping & 0xffffffff;
7662
7663                 return ((base + len + (mss & 0x3fff)) < base);
7664         }
7665         return 0;
7666 }
7667
7668 /* Test for DMA addresses > 40-bit */
7669 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7670                                           int len)
7671 {
7672 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7673         if (tg3_flag(tp, 40BIT_DMA_BUG))
7674                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7675         return 0;
7676 #else
7677         return 0;
7678 #endif
7679 }
7680
7681 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7682                                  dma_addr_t mapping, u32 len, u32 flags,
7683                                  u32 mss, u32 vlan)
7684 {
7685         txbd->addr_hi = ((u64) mapping >> 32);
7686         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7687         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7688         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7689 }
7690
7691 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7692                             dma_addr_t map, u32 len, u32 flags,
7693                             u32 mss, u32 vlan)
7694 {
7695         struct tg3 *tp = tnapi->tp;
7696         bool hwbug = false;
7697
7698         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7699                 hwbug = true;
7700
7701         if (tg3_4g_overflow_test(map, len))
7702                 hwbug = true;
7703
7704         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7705                 hwbug = true;
7706
7707         if (tg3_40bit_overflow_test(tp, map, len))
7708                 hwbug = true;
7709
7710         if (tp->dma_limit) {
7711                 u32 prvidx = *entry;
7712                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7713                 while (len > tp->dma_limit && *budget) {
7714                         u32 frag_len = tp->dma_limit;
7715                         len -= tp->dma_limit;
7716
7717                         /* Avoid the 8byte DMA problem */
7718                         if (len <= 8) {
7719                                 len += tp->dma_limit / 2;
7720                                 frag_len = tp->dma_limit / 2;
7721                         }
7722
7723                         tnapi->tx_buffers[*entry].fragmented = true;
7724
7725                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726                                       frag_len, tmp_flag, mss, vlan);
7727                         *budget -= 1;
7728                         prvidx = *entry;
7729                         *entry = NEXT_TX(*entry);
7730
7731                         map += frag_len;
7732                 }
7733
7734                 if (len) {
7735                         if (*budget) {
7736                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737                                               len, flags, mss, vlan);
7738                                 *budget -= 1;
7739                                 *entry = NEXT_TX(*entry);
7740                         } else {
7741                                 hwbug = true;
7742                                 tnapi->tx_buffers[prvidx].fragmented = false;
7743                         }
7744                 }
7745         } else {
7746                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747                               len, flags, mss, vlan);
7748                 *entry = NEXT_TX(*entry);
7749         }
7750
7751         return hwbug;
7752 }
7753
7754 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7755 {
7756         int i;
7757         struct sk_buff *skb;
7758         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7759
7760         skb = txb->skb;
7761         txb->skb = NULL;
7762
7763         pci_unmap_single(tnapi->tp->pdev,
7764                          dma_unmap_addr(txb, mapping),
7765                          skb_headlen(skb),
7766                          PCI_DMA_TODEVICE);
7767
7768         while (txb->fragmented) {
7769                 txb->fragmented = false;
7770                 entry = NEXT_TX(entry);
7771                 txb = &tnapi->tx_buffers[entry];
7772         }
7773
7774         for (i = 0; i <= last; i++) {
7775                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7776
7777                 entry = NEXT_TX(entry);
7778                 txb = &tnapi->tx_buffers[entry];
7779
7780                 pci_unmap_page(tnapi->tp->pdev,
7781                                dma_unmap_addr(txb, mapping),
7782                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7783
7784                 while (txb->fragmented) {
7785                         txb->fragmented = false;
7786                         entry = NEXT_TX(entry);
7787                         txb = &tnapi->tx_buffers[entry];
7788                 }
7789         }
7790 }
7791
7792 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7793 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7794                                        struct sk_buff **pskb,
7795                                        u32 *entry, u32 *budget,
7796                                        u32 base_flags, u32 mss, u32 vlan)
7797 {
7798         struct tg3 *tp = tnapi->tp;
7799         struct sk_buff *new_skb, *skb = *pskb;
7800         dma_addr_t new_addr = 0;
7801         int ret = 0;
7802
7803         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7804                 new_skb = skb_copy(skb, GFP_ATOMIC);
7805         else {
7806                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7807
7808                 new_skb = skb_copy_expand(skb,
7809                                           skb_headroom(skb) + more_headroom,
7810                                           skb_tailroom(skb), GFP_ATOMIC);
7811         }
7812
7813         if (!new_skb) {
7814                 ret = -1;
7815         } else {
7816                 /* New SKB is guaranteed to be linear. */
7817                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7818                                           PCI_DMA_TODEVICE);
7819                 /* Make sure the mapping succeeded */
7820                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7821                         dev_kfree_skb_any(new_skb);
7822                         ret = -1;
7823                 } else {
7824                         u32 save_entry = *entry;
7825
7826                         base_flags |= TXD_FLAG_END;
7827
7828                         tnapi->tx_buffers[*entry].skb = new_skb;
7829                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7830                                            mapping, new_addr);
7831
7832                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7833                                             new_skb->len, base_flags,
7834                                             mss, vlan)) {
7835                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7836                                 dev_kfree_skb_any(new_skb);
7837                                 ret = -1;
7838                         }
7839                 }
7840         }
7841
7842         dev_consume_skb_any(skb);
7843         *pskb = new_skb;
7844         return ret;
7845 }
7846
7847 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7848 {
7849         /* Check if we will never have enough descriptors,
7850          * as gso_segs can be more than current ring size
7851          */
7852         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7853 }
7854
7855 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7856
7857 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7858  * indicated in tg3_tx_frag_set()
7859  */
7860 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7861                        struct netdev_queue *txq, struct sk_buff *skb)
7862 {
7863         struct sk_buff *segs, *nskb;
7864         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7865
7866         /* Estimate the number of fragments in the worst case */
7867         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7868                 netif_tx_stop_queue(txq);
7869
7870                 /* netif_tx_stop_queue() must be done before checking
7871                  * checking tx index in tg3_tx_avail() below, because in
7872                  * tg3_tx(), we update tx index before checking for
7873                  * netif_tx_queue_stopped().
7874                  */
7875                 smp_mb();
7876                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7877                         return NETDEV_TX_BUSY;
7878
7879                 netif_tx_wake_queue(txq);
7880         }
7881
7882         segs = skb_gso_segment(skb, tp->dev->features &
7883                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7884         if (IS_ERR(segs) || !segs)
7885                 goto tg3_tso_bug_end;
7886
7887         do {
7888                 nskb = segs;
7889                 segs = segs->next;
7890                 nskb->next = NULL;
7891                 tg3_start_xmit(nskb, tp->dev);
7892         } while (segs);
7893
7894 tg3_tso_bug_end:
7895         dev_consume_skb_any(skb);
7896
7897         return NETDEV_TX_OK;
7898 }
7899
7900 /* hard_start_xmit for all devices */
7901 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7902 {
7903         struct tg3 *tp = netdev_priv(dev);
7904         u32 len, entry, base_flags, mss, vlan = 0;
7905         u32 budget;
7906         int i = -1, would_hit_hwbug;
7907         dma_addr_t mapping;
7908         struct tg3_napi *tnapi;
7909         struct netdev_queue *txq;
7910         unsigned int last;
7911         struct iphdr *iph = NULL;
7912         struct tcphdr *tcph = NULL;
7913         __sum16 tcp_csum = 0, ip_csum = 0;
7914         __be16 ip_tot_len = 0;
7915
7916         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7917         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7918         if (tg3_flag(tp, ENABLE_TSS))
7919                 tnapi++;
7920
7921         budget = tg3_tx_avail(tnapi);
7922
7923         /* We are running in BH disabled context with netif_tx_lock
7924          * and TX reclaim runs via tp->napi.poll inside of a software
7925          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7926          * no IRQ context deadlocks to worry about either.  Rejoice!
7927          */
7928         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7929                 if (!netif_tx_queue_stopped(txq)) {
7930                         netif_tx_stop_queue(txq);
7931
7932                         /* This is a hard error, log it. */
7933                         netdev_err(dev,
7934                                    "BUG! Tx Ring full when queue awake!\n");
7935                 }
7936                 return NETDEV_TX_BUSY;
7937         }
7938
7939         entry = tnapi->tx_prod;
7940         base_flags = 0;
7941
7942         mss = skb_shinfo(skb)->gso_size;
7943         if (mss) {
7944                 u32 tcp_opt_len, hdr_len;
7945
7946                 if (skb_cow_head(skb, 0))
7947                         goto drop;
7948
7949                 iph = ip_hdr(skb);
7950                 tcp_opt_len = tcp_optlen(skb);
7951
7952                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7953
7954                 /* HW/FW can not correctly segment packets that have been
7955                  * vlan encapsulated.
7956                  */
7957                 if (skb->protocol == htons(ETH_P_8021Q) ||
7958                     skb->protocol == htons(ETH_P_8021AD)) {
7959                         if (tg3_tso_bug_gso_check(tnapi, skb))
7960                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7961                         goto drop;
7962                 }
7963
7964                 if (!skb_is_gso_v6(skb)) {
7965                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7966                             tg3_flag(tp, TSO_BUG)) {
7967                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7968                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7969                                 goto drop;
7970                         }
7971                         ip_csum = iph->check;
7972                         ip_tot_len = iph->tot_len;
7973                         iph->check = 0;
7974                         iph->tot_len = htons(mss + hdr_len);
7975                 }
7976
7977                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7978                                TXD_FLAG_CPU_POST_DMA);
7979
7980                 tcph = tcp_hdr(skb);
7981                 tcp_csum = tcph->check;
7982
7983                 if (tg3_flag(tp, HW_TSO_1) ||
7984                     tg3_flag(tp, HW_TSO_2) ||
7985                     tg3_flag(tp, HW_TSO_3)) {
7986                         tcph->check = 0;
7987                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7988                 } else {
7989                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7990                                                          0, IPPROTO_TCP, 0);
7991                 }
7992
7993                 if (tg3_flag(tp, HW_TSO_3)) {
7994                         mss |= (hdr_len & 0xc) << 12;
7995                         if (hdr_len & 0x10)
7996                                 base_flags |= 0x00000010;
7997                         base_flags |= (hdr_len & 0x3e0) << 5;
7998                 } else if (tg3_flag(tp, HW_TSO_2))
7999                         mss |= hdr_len << 9;
8000                 else if (tg3_flag(tp, HW_TSO_1) ||
8001                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8002                         if (tcp_opt_len || iph->ihl > 5) {
8003                                 int tsflags;
8004
8005                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8006                                 mss |= (tsflags << 11);
8007                         }
8008                 } else {
8009                         if (tcp_opt_len || iph->ihl > 5) {
8010                                 int tsflags;
8011
8012                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8013                                 base_flags |= tsflags << 12;
8014                         }
8015                 }
8016         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8017                 /* HW/FW can not correctly checksum packets that have been
8018                  * vlan encapsulated.
8019                  */
8020                 if (skb->protocol == htons(ETH_P_8021Q) ||
8021                     skb->protocol == htons(ETH_P_8021AD)) {
8022                         if (skb_checksum_help(skb))
8023                                 goto drop;
8024                 } else  {
8025                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8026                 }
8027         }
8028
8029         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8030             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8031                 base_flags |= TXD_FLAG_JMB_PKT;
8032
8033         if (skb_vlan_tag_present(skb)) {
8034                 base_flags |= TXD_FLAG_VLAN;
8035                 vlan = skb_vlan_tag_get(skb);
8036         }
8037
8038         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8039             tg3_flag(tp, TX_TSTAMP_EN)) {
8040                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8041                 base_flags |= TXD_FLAG_HWTSTAMP;
8042         }
8043
8044         len = skb_headlen(skb);
8045
8046         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8047         if (pci_dma_mapping_error(tp->pdev, mapping))
8048                 goto drop;
8049
8050
8051         tnapi->tx_buffers[entry].skb = skb;
8052         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8053
8054         would_hit_hwbug = 0;
8055
8056         if (tg3_flag(tp, 5701_DMA_BUG))
8057                 would_hit_hwbug = 1;
8058
8059         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8060                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8061                             mss, vlan)) {
8062                 would_hit_hwbug = 1;
8063         } else if (skb_shinfo(skb)->nr_frags > 0) {
8064                 u32 tmp_mss = mss;
8065
8066                 if (!tg3_flag(tp, HW_TSO_1) &&
8067                     !tg3_flag(tp, HW_TSO_2) &&
8068                     !tg3_flag(tp, HW_TSO_3))
8069                         tmp_mss = 0;
8070
8071                 /* Now loop through additional data
8072                  * fragments, and queue them.
8073                  */
8074                 last = skb_shinfo(skb)->nr_frags - 1;
8075                 for (i = 0; i <= last; i++) {
8076                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8077
8078                         len = skb_frag_size(frag);
8079                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8080                                                    len, DMA_TO_DEVICE);
8081
8082                         tnapi->tx_buffers[entry].skb = NULL;
8083                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8084                                            mapping);
8085                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8086                                 goto dma_error;
8087
8088                         if (!budget ||
8089                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8090                                             len, base_flags |
8091                                             ((i == last) ? TXD_FLAG_END : 0),
8092                                             tmp_mss, vlan)) {
8093                                 would_hit_hwbug = 1;
8094                                 break;
8095                         }
8096                 }
8097         }
8098
8099         if (would_hit_hwbug) {
8100                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8101
8102                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8103                         /* If it's a TSO packet, do GSO instead of
8104                          * allocating and copying to a large linear SKB
8105                          */
8106                         if (ip_tot_len) {
8107                                 iph->check = ip_csum;
8108                                 iph->tot_len = ip_tot_len;
8109                         }
8110                         tcph->check = tcp_csum;
8111                         return tg3_tso_bug(tp, tnapi, txq, skb);
8112                 }
8113
8114                 /* If the workaround fails due to memory/mapping
8115                  * failure, silently drop this packet.
8116                  */
8117                 entry = tnapi->tx_prod;
8118                 budget = tg3_tx_avail(tnapi);
8119                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8120                                                 base_flags, mss, vlan))
8121                         goto drop_nofree;
8122         }
8123
8124         skb_tx_timestamp(skb);
8125         netdev_tx_sent_queue(txq, skb->len);
8126
8127         /* Sync BD data before updating mailbox */
8128         wmb();
8129
8130         tnapi->tx_prod = entry;
8131         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8132                 netif_tx_stop_queue(txq);
8133
8134                 /* netif_tx_stop_queue() must be done before checking
8135                  * checking tx index in tg3_tx_avail() below, because in
8136                  * tg3_tx(), we update tx index before checking for
8137                  * netif_tx_queue_stopped().
8138                  */
8139                 smp_mb();
8140                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8141                         netif_tx_wake_queue(txq);
8142         }
8143
8144         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8145                 /* Packets are ready, update Tx producer idx on card. */
8146                 tw32_tx_mbox(tnapi->prodmbox, entry);
8147                 mmiowb();
8148         }
8149
8150         return NETDEV_TX_OK;
8151
8152 dma_error:
8153         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8154         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8155 drop:
8156         dev_kfree_skb_any(skb);
8157 drop_nofree:
8158         tp->tx_dropped++;
8159         return NETDEV_TX_OK;
8160 }
8161
8162 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8163 {
8164         if (enable) {
8165                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8166                                   MAC_MODE_PORT_MODE_MASK);
8167
8168                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8169
8170                 if (!tg3_flag(tp, 5705_PLUS))
8171                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8172
8173                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8174                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8175                 else
8176                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8177         } else {
8178                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8179
8180                 if (tg3_flag(tp, 5705_PLUS) ||
8181                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8182                     tg3_asic_rev(tp) == ASIC_REV_5700)
8183                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8184         }
8185
8186         tw32(MAC_MODE, tp->mac_mode);
8187         udelay(40);
8188 }
8189
8190 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8191 {
8192         u32 val, bmcr, mac_mode, ptest = 0;
8193
8194         tg3_phy_toggle_apd(tp, false);
8195         tg3_phy_toggle_automdix(tp, false);
8196
8197         if (extlpbk && tg3_phy_set_extloopbk(tp))
8198                 return -EIO;
8199
8200         bmcr = BMCR_FULLDPLX;
8201         switch (speed) {
8202         case SPEED_10:
8203                 break;
8204         case SPEED_100:
8205                 bmcr |= BMCR_SPEED100;
8206                 break;
8207         case SPEED_1000:
8208         default:
8209                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8210                         speed = SPEED_100;
8211                         bmcr |= BMCR_SPEED100;
8212                 } else {
8213                         speed = SPEED_1000;
8214                         bmcr |= BMCR_SPEED1000;
8215                 }
8216         }
8217
8218         if (extlpbk) {
8219                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8220                         tg3_readphy(tp, MII_CTRL1000, &val);
8221                         val |= CTL1000_AS_MASTER |
8222                                CTL1000_ENABLE_MASTER;
8223                         tg3_writephy(tp, MII_CTRL1000, val);
8224                 } else {
8225                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8226                                 MII_TG3_FET_PTEST_TRIM_2;
8227                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8228                 }
8229         } else
8230                 bmcr |= BMCR_LOOPBACK;
8231
8232         tg3_writephy(tp, MII_BMCR, bmcr);
8233
8234         /* The write needs to be flushed for the FETs */
8235         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8236                 tg3_readphy(tp, MII_BMCR, &bmcr);
8237
8238         udelay(40);
8239
8240         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8241             tg3_asic_rev(tp) == ASIC_REV_5785) {
8242                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8243                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8244                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8245
8246                 /* The write needs to be flushed for the AC131 */
8247                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8248         }
8249
8250         /* Reset to prevent losing 1st rx packet intermittently */
8251         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8252             tg3_flag(tp, 5780_CLASS)) {
8253                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8254                 udelay(10);
8255                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8256         }
8257
8258         mac_mode = tp->mac_mode &
8259                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8260         if (speed == SPEED_1000)
8261                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8262         else
8263                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8264
8265         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8266                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8267
8268                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8269                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8270                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8271                         mac_mode |= MAC_MODE_LINK_POLARITY;
8272
8273                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8274                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8275         }
8276
8277         tw32(MAC_MODE, mac_mode);
8278         udelay(40);
8279
8280         return 0;
8281 }
8282
8283 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8284 {
8285         struct tg3 *tp = netdev_priv(dev);
8286
8287         if (features & NETIF_F_LOOPBACK) {
8288                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8289                         return;
8290
8291                 spin_lock_bh(&tp->lock);
8292                 tg3_mac_loopback(tp, true);
8293                 netif_carrier_on(tp->dev);
8294                 spin_unlock_bh(&tp->lock);
8295                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8296         } else {
8297                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8298                         return;
8299
8300                 spin_lock_bh(&tp->lock);
8301                 tg3_mac_loopback(tp, false);
8302                 /* Force link status check */
8303                 tg3_setup_phy(tp, true);
8304                 spin_unlock_bh(&tp->lock);
8305                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8306         }
8307 }
8308
8309 static netdev_features_t tg3_fix_features(struct net_device *dev,
8310         netdev_features_t features)
8311 {
8312         struct tg3 *tp = netdev_priv(dev);
8313
8314         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8315                 features &= ~NETIF_F_ALL_TSO;
8316
8317         return features;
8318 }
8319
8320 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8321 {
8322         netdev_features_t changed = dev->features ^ features;
8323
8324         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8325                 tg3_set_loopback(dev, features);
8326
8327         return 0;
8328 }
8329
8330 static void tg3_rx_prodring_free(struct tg3 *tp,
8331                                  struct tg3_rx_prodring_set *tpr)
8332 {
8333         int i;
8334
8335         if (tpr != &tp->napi[0].prodring) {
8336                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8337                      i = (i + 1) & tp->rx_std_ring_mask)
8338                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8339                                         tp->rx_pkt_map_sz);
8340
8341                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8342                         for (i = tpr->rx_jmb_cons_idx;
8343                              i != tpr->rx_jmb_prod_idx;
8344                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8345                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8346                                                 TG3_RX_JMB_MAP_SZ);
8347                         }
8348                 }
8349
8350                 return;
8351         }
8352
8353         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8354                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8355                                 tp->rx_pkt_map_sz);
8356
8357         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8358                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8359                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8360                                         TG3_RX_JMB_MAP_SZ);
8361         }
8362 }
8363
8364 /* Initialize rx rings for packet processing.
8365  *
8366  * The chip has been shut down and the driver detached from
8367  * the networking, so no interrupts or new tx packets will
8368  * end up in the driver.  tp->{tx,}lock are held and thus
8369  * we may not sleep.
8370  */
8371 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8372                                  struct tg3_rx_prodring_set *tpr)
8373 {
8374         u32 i, rx_pkt_dma_sz;
8375
8376         tpr->rx_std_cons_idx = 0;
8377         tpr->rx_std_prod_idx = 0;
8378         tpr->rx_jmb_cons_idx = 0;
8379         tpr->rx_jmb_prod_idx = 0;
8380
8381         if (tpr != &tp->napi[0].prodring) {
8382                 memset(&tpr->rx_std_buffers[0], 0,
8383                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8384                 if (tpr->rx_jmb_buffers)
8385                         memset(&tpr->rx_jmb_buffers[0], 0,
8386                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8387                 goto done;
8388         }
8389
8390         /* Zero out all descriptors. */
8391         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8392
8393         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8394         if (tg3_flag(tp, 5780_CLASS) &&
8395             tp->dev->mtu > ETH_DATA_LEN)
8396                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8397         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8398
8399         /* Initialize invariants of the rings, we only set this
8400          * stuff once.  This works because the card does not
8401          * write into the rx buffer posting rings.
8402          */
8403         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8404                 struct tg3_rx_buffer_desc *rxd;
8405
8406                 rxd = &tpr->rx_std[i];
8407                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8408                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8409                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8410                                (i << RXD_OPAQUE_INDEX_SHIFT));
8411         }
8412
8413         /* Now allocate fresh SKBs for each rx ring. */
8414         for (i = 0; i < tp->rx_pending; i++) {
8415                 unsigned int frag_size;
8416
8417                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8418                                       &frag_size) < 0) {
8419                         netdev_warn(tp->dev,
8420                                     "Using a smaller RX standard ring. Only "
8421                                     "%d out of %d buffers were allocated "
8422                                     "successfully\n", i, tp->rx_pending);
8423                         if (i == 0)
8424                                 goto initfail;
8425                         tp->rx_pending = i;
8426                         break;
8427                 }
8428         }
8429
8430         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8431                 goto done;
8432
8433         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8434
8435         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8436                 goto done;
8437
8438         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8439                 struct tg3_rx_buffer_desc *rxd;
8440
8441                 rxd = &tpr->rx_jmb[i].std;
8442                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8443                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8444                                   RXD_FLAG_JUMBO;
8445                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8446                        (i << RXD_OPAQUE_INDEX_SHIFT));
8447         }
8448
8449         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8450                 unsigned int frag_size;
8451
8452                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8453                                       &frag_size) < 0) {
8454                         netdev_warn(tp->dev,
8455                                     "Using a smaller RX jumbo ring. Only %d "
8456                                     "out of %d buffers were allocated "
8457                                     "successfully\n", i, tp->rx_jumbo_pending);
8458                         if (i == 0)
8459                                 goto initfail;
8460                         tp->rx_jumbo_pending = i;
8461                         break;
8462                 }
8463         }
8464
8465 done:
8466         return 0;
8467
8468 initfail:
8469         tg3_rx_prodring_free(tp, tpr);
8470         return -ENOMEM;
8471 }
8472
8473 static void tg3_rx_prodring_fini(struct tg3 *tp,
8474                                  struct tg3_rx_prodring_set *tpr)
8475 {
8476         kfree(tpr->rx_std_buffers);
8477         tpr->rx_std_buffers = NULL;
8478         kfree(tpr->rx_jmb_buffers);
8479         tpr->rx_jmb_buffers = NULL;
8480         if (tpr->rx_std) {
8481                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8482                                   tpr->rx_std, tpr->rx_std_mapping);
8483                 tpr->rx_std = NULL;
8484         }
8485         if (tpr->rx_jmb) {
8486                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8487                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8488                 tpr->rx_jmb = NULL;
8489         }
8490 }
8491
8492 static int tg3_rx_prodring_init(struct tg3 *tp,
8493                                 struct tg3_rx_prodring_set *tpr)
8494 {
8495         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8496                                       GFP_KERNEL);
8497         if (!tpr->rx_std_buffers)
8498                 return -ENOMEM;
8499
8500         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8501                                          TG3_RX_STD_RING_BYTES(tp),
8502                                          &tpr->rx_std_mapping,
8503                                          GFP_KERNEL);
8504         if (!tpr->rx_std)
8505                 goto err_out;
8506
8507         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8508                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8509                                               GFP_KERNEL);
8510                 if (!tpr->rx_jmb_buffers)
8511                         goto err_out;
8512
8513                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8514                                                  TG3_RX_JMB_RING_BYTES(tp),
8515                                                  &tpr->rx_jmb_mapping,
8516                                                  GFP_KERNEL);
8517                 if (!tpr->rx_jmb)
8518                         goto err_out;
8519         }
8520
8521         return 0;
8522
8523 err_out:
8524         tg3_rx_prodring_fini(tp, tpr);
8525         return -ENOMEM;
8526 }
8527
8528 /* Free up pending packets in all rx/tx rings.
8529  *
8530  * The chip has been shut down and the driver detached from
8531  * the networking, so no interrupts or new tx packets will
8532  * end up in the driver.  tp->{tx,}lock is not held and we are not
8533  * in an interrupt context and thus may sleep.
8534  */
8535 static void tg3_free_rings(struct tg3 *tp)
8536 {
8537         int i, j;
8538
8539         for (j = 0; j < tp->irq_cnt; j++) {
8540                 struct tg3_napi *tnapi = &tp->napi[j];
8541
8542                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8543
8544                 if (!tnapi->tx_buffers)
8545                         continue;
8546
8547                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8548                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8549
8550                         if (!skb)
8551                                 continue;
8552
8553                         tg3_tx_skb_unmap(tnapi, i,
8554                                          skb_shinfo(skb)->nr_frags - 1);
8555
8556                         dev_consume_skb_any(skb);
8557                 }
8558                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8559         }
8560 }
8561
8562 /* Initialize tx/rx rings for packet processing.
8563  *
8564  * The chip has been shut down and the driver detached from
8565  * the networking, so no interrupts or new tx packets will
8566  * end up in the driver.  tp->{tx,}lock are held and thus
8567  * we may not sleep.
8568  */
8569 static int tg3_init_rings(struct tg3 *tp)
8570 {
8571         int i;
8572
8573         /* Free up all the SKBs. */
8574         tg3_free_rings(tp);
8575
8576         for (i = 0; i < tp->irq_cnt; i++) {
8577                 struct tg3_napi *tnapi = &tp->napi[i];
8578
8579                 tnapi->last_tag = 0;
8580                 tnapi->last_irq_tag = 0;
8581                 tnapi->hw_status->status = 0;
8582                 tnapi->hw_status->status_tag = 0;
8583                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8584
8585                 tnapi->tx_prod = 0;
8586                 tnapi->tx_cons = 0;
8587                 if (tnapi->tx_ring)
8588                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8589
8590                 tnapi->rx_rcb_ptr = 0;
8591                 if (tnapi->rx_rcb)
8592                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8593
8594                 if (tnapi->prodring.rx_std &&
8595                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8596                         tg3_free_rings(tp);
8597                         return -ENOMEM;
8598                 }
8599         }
8600
8601         return 0;
8602 }
8603
8604 static void tg3_mem_tx_release(struct tg3 *tp)
8605 {
8606         int i;
8607
8608         for (i = 0; i < tp->irq_max; i++) {
8609                 struct tg3_napi *tnapi = &tp->napi[i];
8610
8611                 if (tnapi->tx_ring) {
8612                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8613                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8614                         tnapi->tx_ring = NULL;
8615                 }
8616
8617                 kfree(tnapi->tx_buffers);
8618                 tnapi->tx_buffers = NULL;
8619         }
8620 }
8621
8622 static int tg3_mem_tx_acquire(struct tg3 *tp)
8623 {
8624         int i;
8625         struct tg3_napi *tnapi = &tp->napi[0];
8626
8627         /* If multivector TSS is enabled, vector 0 does not handle
8628          * tx interrupts.  Don't allocate any resources for it.
8629          */
8630         if (tg3_flag(tp, ENABLE_TSS))
8631                 tnapi++;
8632
8633         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8634                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8635                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8636                 if (!tnapi->tx_buffers)
8637                         goto err_out;
8638
8639                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8640                                                     TG3_TX_RING_BYTES,
8641                                                     &tnapi->tx_desc_mapping,
8642                                                     GFP_KERNEL);
8643                 if (!tnapi->tx_ring)
8644                         goto err_out;
8645         }
8646
8647         return 0;
8648
8649 err_out:
8650         tg3_mem_tx_release(tp);
8651         return -ENOMEM;
8652 }
8653
8654 static void tg3_mem_rx_release(struct tg3 *tp)
8655 {
8656         int i;
8657
8658         for (i = 0; i < tp->irq_max; i++) {
8659                 struct tg3_napi *tnapi = &tp->napi[i];
8660
8661                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8662
8663                 if (!tnapi->rx_rcb)
8664                         continue;
8665
8666                 dma_free_coherent(&tp->pdev->dev,
8667                                   TG3_RX_RCB_RING_BYTES(tp),
8668                                   tnapi->rx_rcb,
8669                                   tnapi->rx_rcb_mapping);
8670                 tnapi->rx_rcb = NULL;
8671         }
8672 }
8673
8674 static int tg3_mem_rx_acquire(struct tg3 *tp)
8675 {
8676         unsigned int i, limit;
8677
8678         limit = tp->rxq_cnt;
8679
8680         /* If RSS is enabled, we need a (dummy) producer ring
8681          * set on vector zero.  This is the true hw prodring.
8682          */
8683         if (tg3_flag(tp, ENABLE_RSS))
8684                 limit++;
8685
8686         for (i = 0; i < limit; i++) {
8687                 struct tg3_napi *tnapi = &tp->napi[i];
8688
8689                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8690                         goto err_out;
8691
8692                 /* If multivector RSS is enabled, vector 0
8693                  * does not handle rx or tx interrupts.
8694                  * Don't allocate any resources for it.
8695                  */
8696                 if (!i && tg3_flag(tp, ENABLE_RSS))
8697                         continue;
8698
8699                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8700                                                     TG3_RX_RCB_RING_BYTES(tp),
8701                                                     &tnapi->rx_rcb_mapping,
8702                                                     GFP_KERNEL);
8703                 if (!tnapi->rx_rcb)
8704                         goto err_out;
8705         }
8706
8707         return 0;
8708
8709 err_out:
8710         tg3_mem_rx_release(tp);
8711         return -ENOMEM;
8712 }
8713
8714 /*
8715  * Must not be invoked with interrupt sources disabled and
8716  * the hardware shutdown down.
8717  */
8718 static void tg3_free_consistent(struct tg3 *tp)
8719 {
8720         int i;
8721
8722         for (i = 0; i < tp->irq_cnt; i++) {
8723                 struct tg3_napi *tnapi = &tp->napi[i];
8724
8725                 if (tnapi->hw_status) {
8726                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8727                                           tnapi->hw_status,
8728                                           tnapi->status_mapping);
8729                         tnapi->hw_status = NULL;
8730                 }
8731         }
8732
8733         tg3_mem_rx_release(tp);
8734         tg3_mem_tx_release(tp);
8735
8736         /* tp->hw_stats can be referenced safely:
8737          *     1. under rtnl_lock
8738          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8739          */
8740         if (tp->hw_stats) {
8741                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8742                                   tp->hw_stats, tp->stats_mapping);
8743                 tp->hw_stats = NULL;
8744         }
8745 }
8746
8747 /*
8748  * Must not be invoked with interrupt sources disabled and
8749  * the hardware shutdown down.  Can sleep.
8750  */
8751 static int tg3_alloc_consistent(struct tg3 *tp)
8752 {
8753         int i;
8754
8755         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8756                                            sizeof(struct tg3_hw_stats),
8757                                            &tp->stats_mapping, GFP_KERNEL);
8758         if (!tp->hw_stats)
8759                 goto err_out;
8760
8761         for (i = 0; i < tp->irq_cnt; i++) {
8762                 struct tg3_napi *tnapi = &tp->napi[i];
8763                 struct tg3_hw_status *sblk;
8764
8765                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8766                                                        TG3_HW_STATUS_SIZE,
8767                                                        &tnapi->status_mapping,
8768                                                        GFP_KERNEL);
8769                 if (!tnapi->hw_status)
8770                         goto err_out;
8771
8772                 sblk = tnapi->hw_status;
8773
8774                 if (tg3_flag(tp, ENABLE_RSS)) {
8775                         u16 *prodptr = NULL;
8776
8777                         /*
8778                          * When RSS is enabled, the status block format changes
8779                          * slightly.  The "rx_jumbo_consumer", "reserved",
8780                          * and "rx_mini_consumer" members get mapped to the
8781                          * other three rx return ring producer indexes.
8782                          */
8783                         switch (i) {
8784                         case 1:
8785                                 prodptr = &sblk->idx[0].rx_producer;
8786                                 break;
8787                         case 2:
8788                                 prodptr = &sblk->rx_jumbo_consumer;
8789                                 break;
8790                         case 3:
8791                                 prodptr = &sblk->reserved;
8792                                 break;
8793                         case 4:
8794                                 prodptr = &sblk->rx_mini_consumer;
8795                                 break;
8796                         }
8797                         tnapi->rx_rcb_prod_idx = prodptr;
8798                 } else {
8799                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8800                 }
8801         }
8802
8803         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8804                 goto err_out;
8805
8806         return 0;
8807
8808 err_out:
8809         tg3_free_consistent(tp);
8810         return -ENOMEM;
8811 }
8812
8813 #define MAX_WAIT_CNT 1000
8814
8815 /* To stop a block, clear the enable bit and poll till it
8816  * clears.  tp->lock is held.
8817  */
8818 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8819 {
8820         unsigned int i;
8821         u32 val;
8822
8823         if (tg3_flag(tp, 5705_PLUS)) {
8824                 switch (ofs) {
8825                 case RCVLSC_MODE:
8826                 case DMAC_MODE:
8827                 case MBFREE_MODE:
8828                 case BUFMGR_MODE:
8829                 case MEMARB_MODE:
8830                         /* We can't enable/disable these bits of the
8831                          * 5705/5750, just say success.
8832                          */
8833                         return 0;
8834
8835                 default:
8836                         break;
8837                 }
8838         }
8839
8840         val = tr32(ofs);
8841         val &= ~enable_bit;
8842         tw32_f(ofs, val);
8843
8844         for (i = 0; i < MAX_WAIT_CNT; i++) {
8845                 if (pci_channel_offline(tp->pdev)) {
8846                         dev_err(&tp->pdev->dev,
8847                                 "tg3_stop_block device offline, "
8848                                 "ofs=%lx enable_bit=%x\n",
8849                                 ofs, enable_bit);
8850                         return -ENODEV;
8851                 }
8852
8853                 udelay(100);
8854                 val = tr32(ofs);
8855                 if ((val & enable_bit) == 0)
8856                         break;
8857         }
8858
8859         if (i == MAX_WAIT_CNT && !silent) {
8860                 dev_err(&tp->pdev->dev,
8861                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8862                         ofs, enable_bit);
8863                 return -ENODEV;
8864         }
8865
8866         return 0;
8867 }
8868
8869 /* tp->lock is held. */
8870 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8871 {
8872         int i, err;
8873
8874         tg3_disable_ints(tp);
8875
8876         if (pci_channel_offline(tp->pdev)) {
8877                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8878                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8879                 err = -ENODEV;
8880                 goto err_no_dev;
8881         }
8882
8883         tp->rx_mode &= ~RX_MODE_ENABLE;
8884         tw32_f(MAC_RX_MODE, tp->rx_mode);
8885         udelay(10);
8886
8887         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8890         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8891         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8892         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8893
8894         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8895         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8896         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8897         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8898         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8899         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8900         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8901
8902         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8903         tw32_f(MAC_MODE, tp->mac_mode);
8904         udelay(40);
8905
8906         tp->tx_mode &= ~TX_MODE_ENABLE;
8907         tw32_f(MAC_TX_MODE, tp->tx_mode);
8908
8909         for (i = 0; i < MAX_WAIT_CNT; i++) {
8910                 udelay(100);
8911                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8912                         break;
8913         }
8914         if (i >= MAX_WAIT_CNT) {
8915                 dev_err(&tp->pdev->dev,
8916                         "%s timed out, TX_MODE_ENABLE will not clear "
8917                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8918                 err |= -ENODEV;
8919         }
8920
8921         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8922         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8923         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8924
8925         tw32(FTQ_RESET, 0xffffffff);
8926         tw32(FTQ_RESET, 0x00000000);
8927
8928         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8929         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8930
8931 err_no_dev:
8932         for (i = 0; i < tp->irq_cnt; i++) {
8933                 struct tg3_napi *tnapi = &tp->napi[i];
8934                 if (tnapi->hw_status)
8935                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8936         }
8937
8938         return err;
8939 }
8940
8941 /* Save PCI command register before chip reset */
8942 static void tg3_save_pci_state(struct tg3 *tp)
8943 {
8944         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8945 }
8946
8947 /* Restore PCI state after chip reset */
8948 static void tg3_restore_pci_state(struct tg3 *tp)
8949 {
8950         u32 val;
8951
8952         /* Re-enable indirect register accesses. */
8953         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8954                                tp->misc_host_ctrl);
8955
8956         /* Set MAX PCI retry to zero. */
8957         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8958         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8959             tg3_flag(tp, PCIX_MODE))
8960                 val |= PCISTATE_RETRY_SAME_DMA;
8961         /* Allow reads and writes to the APE register and memory space. */
8962         if (tg3_flag(tp, ENABLE_APE))
8963                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8964                        PCISTATE_ALLOW_APE_SHMEM_WR |
8965                        PCISTATE_ALLOW_APE_PSPACE_WR;
8966         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8967
8968         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8969
8970         if (!tg3_flag(tp, PCI_EXPRESS)) {
8971                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8972                                       tp->pci_cacheline_sz);
8973                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8974                                       tp->pci_lat_timer);
8975         }
8976
8977         /* Make sure PCI-X relaxed ordering bit is clear. */
8978         if (tg3_flag(tp, PCIX_MODE)) {
8979                 u16 pcix_cmd;
8980
8981                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8982                                      &pcix_cmd);
8983                 pcix_cmd &= ~PCI_X_CMD_ERO;
8984                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8985                                       pcix_cmd);
8986         }
8987
8988         if (tg3_flag(tp, 5780_CLASS)) {
8989
8990                 /* Chip reset on 5780 will reset MSI enable bit,
8991                  * so need to restore it.
8992                  */
8993                 if (tg3_flag(tp, USING_MSI)) {
8994                         u16 ctrl;
8995
8996                         pci_read_config_word(tp->pdev,
8997                                              tp->msi_cap + PCI_MSI_FLAGS,
8998                                              &ctrl);
8999                         pci_write_config_word(tp->pdev,
9000                                               tp->msi_cap + PCI_MSI_FLAGS,
9001                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9002                         val = tr32(MSGINT_MODE);
9003                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9004                 }
9005         }
9006 }
9007
9008 static void tg3_override_clk(struct tg3 *tp)
9009 {
9010         u32 val;
9011
9012         switch (tg3_asic_rev(tp)) {
9013         case ASIC_REV_5717:
9014                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9015                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9016                      TG3_CPMU_MAC_ORIDE_ENABLE);
9017                 break;
9018
9019         case ASIC_REV_5719:
9020         case ASIC_REV_5720:
9021                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9022                 break;
9023
9024         default:
9025                 return;
9026         }
9027 }
9028
9029 static void tg3_restore_clk(struct tg3 *tp)
9030 {
9031         u32 val;
9032
9033         switch (tg3_asic_rev(tp)) {
9034         case ASIC_REV_5717:
9035                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9036                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9037                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9038                 break;
9039
9040         case ASIC_REV_5719:
9041         case ASIC_REV_5720:
9042                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9043                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9044                 break;
9045
9046         default:
9047                 return;
9048         }
9049 }
9050
9051 /* tp->lock is held. */
9052 static int tg3_chip_reset(struct tg3 *tp)
9053         __releases(tp->lock)
9054         __acquires(tp->lock)
9055 {
9056         u32 val;
9057         void (*write_op)(struct tg3 *, u32, u32);
9058         int i, err;
9059
9060         if (!pci_device_is_present(tp->pdev))
9061                 return -ENODEV;
9062
9063         tg3_nvram_lock(tp);
9064
9065         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9066
9067         /* No matching tg3_nvram_unlock() after this because
9068          * chip reset below will undo the nvram lock.
9069          */
9070         tp->nvram_lock_cnt = 0;
9071
9072         /* GRC_MISC_CFG core clock reset will clear the memory
9073          * enable bit in PCI register 4 and the MSI enable bit
9074          * on some chips, so we save relevant registers here.
9075          */
9076         tg3_save_pci_state(tp);
9077
9078         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9079             tg3_flag(tp, 5755_PLUS))
9080                 tw32(GRC_FASTBOOT_PC, 0);
9081
9082         /*
9083          * We must avoid the readl() that normally takes place.
9084          * It locks machines, causes machine checks, and other
9085          * fun things.  So, temporarily disable the 5701
9086          * hardware workaround, while we do the reset.
9087          */
9088         write_op = tp->write32;
9089         if (write_op == tg3_write_flush_reg32)
9090                 tp->write32 = tg3_write32;
9091
9092         /* Prevent the irq handler from reading or writing PCI registers
9093          * during chip reset when the memory enable bit in the PCI command
9094          * register may be cleared.  The chip does not generate interrupt
9095          * at this time, but the irq handler may still be called due to irq
9096          * sharing or irqpoll.
9097          */
9098         tg3_flag_set(tp, CHIP_RESETTING);
9099         for (i = 0; i < tp->irq_cnt; i++) {
9100                 struct tg3_napi *tnapi = &tp->napi[i];
9101                 if (tnapi->hw_status) {
9102                         tnapi->hw_status->status = 0;
9103                         tnapi->hw_status->status_tag = 0;
9104                 }
9105                 tnapi->last_tag = 0;
9106                 tnapi->last_irq_tag = 0;
9107         }
9108         smp_mb();
9109
9110         tg3_full_unlock(tp);
9111
9112         for (i = 0; i < tp->irq_cnt; i++)
9113                 synchronize_irq(tp->napi[i].irq_vec);
9114
9115         tg3_full_lock(tp, 0);
9116
9117         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9118                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9119                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9120         }
9121
9122         /* do the reset */
9123         val = GRC_MISC_CFG_CORECLK_RESET;
9124
9125         if (tg3_flag(tp, PCI_EXPRESS)) {
9126                 /* Force PCIe 1.0a mode */
9127                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9128                     !tg3_flag(tp, 57765_PLUS) &&
9129                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9130                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9131                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9132
9133                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9134                         tw32(GRC_MISC_CFG, (1 << 29));
9135                         val |= (1 << 29);
9136                 }
9137         }
9138
9139         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9140                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9141                 tw32(GRC_VCPU_EXT_CTRL,
9142                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9143         }
9144
9145         /* Set the clock to the highest frequency to avoid timeouts. With link
9146          * aware mode, the clock speed could be slow and bootcode does not
9147          * complete within the expected time. Override the clock to allow the
9148          * bootcode to finish sooner and then restore it.
9149          */
9150         tg3_override_clk(tp);
9151
9152         /* Manage gphy power for all CPMU absent PCIe devices. */
9153         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9154                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9155
9156         tw32(GRC_MISC_CFG, val);
9157
9158         /* restore 5701 hardware bug workaround write method */
9159         tp->write32 = write_op;
9160
9161         /* Unfortunately, we have to delay before the PCI read back.
9162          * Some 575X chips even will not respond to a PCI cfg access
9163          * when the reset command is given to the chip.
9164          *
9165          * How do these hardware designers expect things to work
9166          * properly if the PCI write is posted for a long period
9167          * of time?  It is always necessary to have some method by
9168          * which a register read back can occur to push the write
9169          * out which does the reset.
9170          *
9171          * For most tg3 variants the trick below was working.
9172          * Ho hum...
9173          */
9174         udelay(120);
9175
9176         /* Flush PCI posted writes.  The normal MMIO registers
9177          * are inaccessible at this time so this is the only
9178          * way to make this reliably (actually, this is no longer
9179          * the case, see above).  I tried to use indirect
9180          * register read/write but this upset some 5701 variants.
9181          */
9182         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9183
9184         udelay(120);
9185
9186         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9187                 u16 val16;
9188
9189                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9190                         int j;
9191                         u32 cfg_val;
9192
9193                         /* Wait for link training to complete.  */
9194                         for (j = 0; j < 5000; j++)
9195                                 udelay(100);
9196
9197                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9198                         pci_write_config_dword(tp->pdev, 0xc4,
9199                                                cfg_val | (1 << 15));
9200                 }
9201
9202                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9203                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9204                 /*
9205                  * Older PCIe devices only support the 128 byte
9206                  * MPS setting.  Enforce the restriction.
9207                  */
9208                 if (!tg3_flag(tp, CPMU_PRESENT))
9209                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9210                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9211
9212                 /* Clear error status */
9213                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9214                                       PCI_EXP_DEVSTA_CED |
9215                                       PCI_EXP_DEVSTA_NFED |
9216                                       PCI_EXP_DEVSTA_FED |
9217                                       PCI_EXP_DEVSTA_URD);
9218         }
9219
9220         tg3_restore_pci_state(tp);
9221
9222         tg3_flag_clear(tp, CHIP_RESETTING);
9223         tg3_flag_clear(tp, ERROR_PROCESSED);
9224
9225         val = 0;
9226         if (tg3_flag(tp, 5780_CLASS))
9227                 val = tr32(MEMARB_MODE);
9228         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9229
9230         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9231                 tg3_stop_fw(tp);
9232                 tw32(0x5000, 0x400);
9233         }
9234
9235         if (tg3_flag(tp, IS_SSB_CORE)) {
9236                 /*
9237                  * BCM4785: In order to avoid repercussions from using
9238                  * potentially defective internal ROM, stop the Rx RISC CPU,
9239                  * which is not required.
9240                  */
9241                 tg3_stop_fw(tp);
9242                 tg3_halt_cpu(tp, RX_CPU_BASE);
9243         }
9244
9245         err = tg3_poll_fw(tp);
9246         if (err)
9247                 return err;
9248
9249         tw32(GRC_MODE, tp->grc_mode);
9250
9251         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9252                 val = tr32(0xc4);
9253
9254                 tw32(0xc4, val | (1 << 15));
9255         }
9256
9257         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9258             tg3_asic_rev(tp) == ASIC_REV_5705) {
9259                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9260                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9261                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9262                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9263         }
9264
9265         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9266                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9267                 val = tp->mac_mode;
9268         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9269                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9270                 val = tp->mac_mode;
9271         } else
9272                 val = 0;
9273
9274         tw32_f(MAC_MODE, val);
9275         udelay(40);
9276
9277         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9278
9279         tg3_mdio_start(tp);
9280
9281         if (tg3_flag(tp, PCI_EXPRESS) &&
9282             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9283             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9284             !tg3_flag(tp, 57765_PLUS)) {
9285                 val = tr32(0x7c00);
9286
9287                 tw32(0x7c00, val | (1 << 25));
9288         }
9289
9290         tg3_restore_clk(tp);
9291
9292         /* Reprobe ASF enable state.  */
9293         tg3_flag_clear(tp, ENABLE_ASF);
9294         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9295                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9296
9297         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9298         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9299         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9300                 u32 nic_cfg;
9301
9302                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9303                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9304                         tg3_flag_set(tp, ENABLE_ASF);
9305                         tp->last_event_jiffies = jiffies;
9306                         if (tg3_flag(tp, 5750_PLUS))
9307                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9308
9309                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9310                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9311                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9312                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9313                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9314                 }
9315         }
9316
9317         return 0;
9318 }
9319
9320 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9321 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9322 static void __tg3_set_rx_mode(struct net_device *);
9323
9324 /* tp->lock is held. */
9325 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9326 {
9327         int err;
9328
9329         tg3_stop_fw(tp);
9330
9331         tg3_write_sig_pre_reset(tp, kind);
9332
9333         tg3_abort_hw(tp, silent);
9334         err = tg3_chip_reset(tp);
9335
9336         __tg3_set_mac_addr(tp, false);
9337
9338         tg3_write_sig_legacy(tp, kind);
9339         tg3_write_sig_post_reset(tp, kind);
9340
9341         if (tp->hw_stats) {
9342                 /* Save the stats across chip resets... */
9343                 tg3_get_nstats(tp, &tp->net_stats_prev);
9344                 tg3_get_estats(tp, &tp->estats_prev);
9345
9346                 /* And make sure the next sample is new data */
9347                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9348         }
9349
9350         return err;
9351 }
9352
9353 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9354 {
9355         struct tg3 *tp = netdev_priv(dev);
9356         struct sockaddr *addr = p;
9357         int err = 0;
9358         bool skip_mac_1 = false;
9359
9360         if (!is_valid_ether_addr(addr->sa_data))
9361                 return -EADDRNOTAVAIL;
9362
9363         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9364
9365         if (!netif_running(dev))
9366                 return 0;
9367
9368         if (tg3_flag(tp, ENABLE_ASF)) {
9369                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9370
9371                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9372                 addr0_low = tr32(MAC_ADDR_0_LOW);
9373                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9374                 addr1_low = tr32(MAC_ADDR_1_LOW);
9375
9376                 /* Skip MAC addr 1 if ASF is using it. */
9377                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9378                     !(addr1_high == 0 && addr1_low == 0))
9379                         skip_mac_1 = true;
9380         }
9381         spin_lock_bh(&tp->lock);
9382         __tg3_set_mac_addr(tp, skip_mac_1);
9383         __tg3_set_rx_mode(dev);
9384         spin_unlock_bh(&tp->lock);
9385
9386         return err;
9387 }
9388
9389 /* tp->lock is held. */
9390 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9391                            dma_addr_t mapping, u32 maxlen_flags,
9392                            u32 nic_addr)
9393 {
9394         tg3_write_mem(tp,
9395                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9396                       ((u64) mapping >> 32));
9397         tg3_write_mem(tp,
9398                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9399                       ((u64) mapping & 0xffffffff));
9400         tg3_write_mem(tp,
9401                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9402                        maxlen_flags);
9403
9404         if (!tg3_flag(tp, 5705_PLUS))
9405                 tg3_write_mem(tp,
9406                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9407                               nic_addr);
9408 }
9409
9410
9411 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9412 {
9413         int i = 0;
9414
9415         if (!tg3_flag(tp, ENABLE_TSS)) {
9416                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9417                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9418                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9419         } else {
9420                 tw32(HOSTCC_TXCOL_TICKS, 0);
9421                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9422                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9423
9424                 for (; i < tp->txq_cnt; i++) {
9425                         u32 reg;
9426
9427                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9428                         tw32(reg, ec->tx_coalesce_usecs);
9429                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9430                         tw32(reg, ec->tx_max_coalesced_frames);
9431                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9432                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9433                 }
9434         }
9435
9436         for (; i < tp->irq_max - 1; i++) {
9437                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9438                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9439                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9440         }
9441 }
9442
9443 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9444 {
9445         int i = 0;
9446         u32 limit = tp->rxq_cnt;
9447
9448         if (!tg3_flag(tp, ENABLE_RSS)) {
9449                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9450                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9451                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9452                 limit--;
9453         } else {
9454                 tw32(HOSTCC_RXCOL_TICKS, 0);
9455                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9456                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9457         }
9458
9459         for (; i < limit; i++) {
9460                 u32 reg;
9461
9462                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9463                 tw32(reg, ec->rx_coalesce_usecs);
9464                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9465                 tw32(reg, ec->rx_max_coalesced_frames);
9466                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9467                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9468         }
9469
9470         for (; i < tp->irq_max - 1; i++) {
9471                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9472                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9473                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9474         }
9475 }
9476
9477 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9478 {
9479         tg3_coal_tx_init(tp, ec);
9480         tg3_coal_rx_init(tp, ec);
9481
9482         if (!tg3_flag(tp, 5705_PLUS)) {
9483                 u32 val = ec->stats_block_coalesce_usecs;
9484
9485                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9486                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9487
9488                 if (!tp->link_up)
9489                         val = 0;
9490
9491                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9492         }
9493 }
9494
9495 /* tp->lock is held. */
9496 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9497 {
9498         u32 txrcb, limit;
9499
9500         /* Disable all transmit rings but the first. */
9501         if (!tg3_flag(tp, 5705_PLUS))
9502                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9503         else if (tg3_flag(tp, 5717_PLUS))
9504                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9505         else if (tg3_flag(tp, 57765_CLASS) ||
9506                  tg3_asic_rev(tp) == ASIC_REV_5762)
9507                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9508         else
9509                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9510
9511         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9512              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9513                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9514                               BDINFO_FLAGS_DISABLED);
9515 }
9516
9517 /* tp->lock is held. */
9518 static void tg3_tx_rcbs_init(struct tg3 *tp)
9519 {
9520         int i = 0;
9521         u32 txrcb = NIC_SRAM_SEND_RCB;
9522
9523         if (tg3_flag(tp, ENABLE_TSS))
9524                 i++;
9525
9526         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9527                 struct tg3_napi *tnapi = &tp->napi[i];
9528
9529                 if (!tnapi->tx_ring)
9530                         continue;
9531
9532                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9533                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9534                                NIC_SRAM_TX_BUFFER_DESC);
9535         }
9536 }
9537
9538 /* tp->lock is held. */
9539 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9540 {
9541         u32 rxrcb, limit;
9542
9543         /* Disable all receive return rings but the first. */
9544         if (tg3_flag(tp, 5717_PLUS))
9545                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9546         else if (!tg3_flag(tp, 5705_PLUS))
9547                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9548         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9549                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9550                  tg3_flag(tp, 57765_CLASS))
9551                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9552         else
9553                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9554
9555         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9556              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9557                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9558                               BDINFO_FLAGS_DISABLED);
9559 }
9560
9561 /* tp->lock is held. */
9562 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9563 {
9564         int i = 0;
9565         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9566
9567         if (tg3_flag(tp, ENABLE_RSS))
9568                 i++;
9569
9570         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9571                 struct tg3_napi *tnapi = &tp->napi[i];
9572
9573                 if (!tnapi->rx_rcb)
9574                         continue;
9575
9576                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9577                                (tp->rx_ret_ring_mask + 1) <<
9578                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9579         }
9580 }
9581
9582 /* tp->lock is held. */
9583 static void tg3_rings_reset(struct tg3 *tp)
9584 {
9585         int i;
9586         u32 stblk;
9587         struct tg3_napi *tnapi = &tp->napi[0];
9588
9589         tg3_tx_rcbs_disable(tp);
9590
9591         tg3_rx_ret_rcbs_disable(tp);
9592
9593         /* Disable interrupts */
9594         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9595         tp->napi[0].chk_msi_cnt = 0;
9596         tp->napi[0].last_rx_cons = 0;
9597         tp->napi[0].last_tx_cons = 0;
9598
9599         /* Zero mailbox registers. */
9600         if (tg3_flag(tp, SUPPORT_MSIX)) {
9601                 for (i = 1; i < tp->irq_max; i++) {
9602                         tp->napi[i].tx_prod = 0;
9603                         tp->napi[i].tx_cons = 0;
9604                         if (tg3_flag(tp, ENABLE_TSS))
9605                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9606                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9607                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9608                         tp->napi[i].chk_msi_cnt = 0;
9609                         tp->napi[i].last_rx_cons = 0;
9610                         tp->napi[i].last_tx_cons = 0;
9611                 }
9612                 if (!tg3_flag(tp, ENABLE_TSS))
9613                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9614         } else {
9615                 tp->napi[0].tx_prod = 0;
9616                 tp->napi[0].tx_cons = 0;
9617                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9618                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9619         }
9620
9621         /* Make sure the NIC-based send BD rings are disabled. */
9622         if (!tg3_flag(tp, 5705_PLUS)) {
9623                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9624                 for (i = 0; i < 16; i++)
9625                         tw32_tx_mbox(mbox + i * 8, 0);
9626         }
9627
9628         /* Clear status block in ram. */
9629         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9630
9631         /* Set status block DMA address */
9632         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9633              ((u64) tnapi->status_mapping >> 32));
9634         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9635              ((u64) tnapi->status_mapping & 0xffffffff));
9636
9637         stblk = HOSTCC_STATBLCK_RING1;
9638
9639         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9640                 u64 mapping = (u64)tnapi->status_mapping;
9641                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9642                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9643                 stblk += 8;
9644
9645                 /* Clear status block in ram. */
9646                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9647         }
9648
9649         tg3_tx_rcbs_init(tp);
9650         tg3_rx_ret_rcbs_init(tp);
9651 }
9652
9653 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9654 {
9655         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9656
9657         if (!tg3_flag(tp, 5750_PLUS) ||
9658             tg3_flag(tp, 5780_CLASS) ||
9659             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9660             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9661             tg3_flag(tp, 57765_PLUS))
9662                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9663         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9664                  tg3_asic_rev(tp) == ASIC_REV_5787)
9665                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9666         else
9667                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9668
9669         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9670         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9671
9672         val = min(nic_rep_thresh, host_rep_thresh);
9673         tw32(RCVBDI_STD_THRESH, val);
9674
9675         if (tg3_flag(tp, 57765_PLUS))
9676                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9677
9678         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9679                 return;
9680
9681         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9682
9683         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9684
9685         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9686         tw32(RCVBDI_JUMBO_THRESH, val);
9687
9688         if (tg3_flag(tp, 57765_PLUS))
9689                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9690 }
9691
9692 static inline u32 calc_crc(unsigned char *buf, int len)
9693 {
9694         u32 reg;
9695         u32 tmp;
9696         int j, k;
9697
9698         reg = 0xffffffff;
9699
9700         for (j = 0; j < len; j++) {
9701                 reg ^= buf[j];
9702
9703                 for (k = 0; k < 8; k++) {
9704                         tmp = reg & 0x01;
9705
9706                         reg >>= 1;
9707
9708                         if (tmp)
9709                                 reg ^= 0xedb88320;
9710                 }
9711         }
9712
9713         return ~reg;
9714 }
9715
9716 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9717 {
9718         /* accept or reject all multicast frames */
9719         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9720         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9721         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9722         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9723 }
9724
9725 static void __tg3_set_rx_mode(struct net_device *dev)
9726 {
9727         struct tg3 *tp = netdev_priv(dev);
9728         u32 rx_mode;
9729
9730         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9731                                   RX_MODE_KEEP_VLAN_TAG);
9732
9733 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9734         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9735          * flag clear.
9736          */
9737         if (!tg3_flag(tp, ENABLE_ASF))
9738                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9739 #endif
9740
9741         if (dev->flags & IFF_PROMISC) {
9742                 /* Promiscuous mode. */
9743                 rx_mode |= RX_MODE_PROMISC;
9744         } else if (dev->flags & IFF_ALLMULTI) {
9745                 /* Accept all multicast. */
9746                 tg3_set_multi(tp, 1);
9747         } else if (netdev_mc_empty(dev)) {
9748                 /* Reject all multicast. */
9749                 tg3_set_multi(tp, 0);
9750         } else {
9751                 /* Accept one or more multicast(s). */
9752                 struct netdev_hw_addr *ha;
9753                 u32 mc_filter[4] = { 0, };
9754                 u32 regidx;
9755                 u32 bit;
9756                 u32 crc;
9757
9758                 netdev_for_each_mc_addr(ha, dev) {
9759                         crc = calc_crc(ha->addr, ETH_ALEN);
9760                         bit = ~crc & 0x7f;
9761                         regidx = (bit & 0x60) >> 5;
9762                         bit &= 0x1f;
9763                         mc_filter[regidx] |= (1 << bit);
9764                 }
9765
9766                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9767                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9768                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9769                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9770         }
9771
9772         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9773                 rx_mode |= RX_MODE_PROMISC;
9774         } else if (!(dev->flags & IFF_PROMISC)) {
9775                 /* Add all entries into to the mac addr filter list */
9776                 int i = 0;
9777                 struct netdev_hw_addr *ha;
9778
9779                 netdev_for_each_uc_addr(ha, dev) {
9780                         __tg3_set_one_mac_addr(tp, ha->addr,
9781                                                i + TG3_UCAST_ADDR_IDX(tp));
9782                         i++;
9783                 }
9784         }
9785
9786         if (rx_mode != tp->rx_mode) {
9787                 tp->rx_mode = rx_mode;
9788                 tw32_f(MAC_RX_MODE, rx_mode);
9789                 udelay(10);
9790         }
9791 }
9792
9793 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9794 {
9795         int i;
9796
9797         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9798                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9799 }
9800
9801 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9802 {
9803         int i;
9804
9805         if (!tg3_flag(tp, SUPPORT_MSIX))
9806                 return;
9807
9808         if (tp->rxq_cnt == 1) {
9809                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9810                 return;
9811         }
9812
9813         /* Validate table against current IRQ count */
9814         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9815                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9816                         break;
9817         }
9818
9819         if (i != TG3_RSS_INDIR_TBL_SIZE)
9820                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9821 }
9822
9823 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9824 {
9825         int i = 0;
9826         u32 reg = MAC_RSS_INDIR_TBL_0;
9827
9828         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9829                 u32 val = tp->rss_ind_tbl[i];
9830                 i++;
9831                 for (; i % 8; i++) {
9832                         val <<= 4;
9833                         val |= tp->rss_ind_tbl[i];
9834                 }
9835                 tw32(reg, val);
9836                 reg += 4;
9837         }
9838 }
9839
9840 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9841 {
9842         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9843                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9844         else
9845                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9846 }
9847
9848 /* tp->lock is held. */
9849 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9850 {
9851         u32 val, rdmac_mode;
9852         int i, err, limit;
9853         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9854
9855         tg3_disable_ints(tp);
9856
9857         tg3_stop_fw(tp);
9858
9859         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9860
9861         if (tg3_flag(tp, INIT_COMPLETE))
9862                 tg3_abort_hw(tp, 1);
9863
9864         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9865             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9866                 tg3_phy_pull_config(tp);
9867                 tg3_eee_pull_config(tp, NULL);
9868                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9869         }
9870
9871         /* Enable MAC control of LPI */
9872         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9873                 tg3_setup_eee(tp);
9874
9875         if (reset_phy)
9876                 tg3_phy_reset(tp);
9877
9878         err = tg3_chip_reset(tp);
9879         if (err)
9880                 return err;
9881
9882         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9883
9884         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9885                 val = tr32(TG3_CPMU_CTRL);
9886                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9887                 tw32(TG3_CPMU_CTRL, val);
9888
9889                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9890                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9891                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9892                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9893
9894                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9895                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9896                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9897                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9898
9899                 val = tr32(TG3_CPMU_HST_ACC);
9900                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9901                 val |= CPMU_HST_ACC_MACCLK_6_25;
9902                 tw32(TG3_CPMU_HST_ACC, val);
9903         }
9904
9905         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9906                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9907                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9908                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9909                 tw32(PCIE_PWR_MGMT_THRESH, val);
9910
9911                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9912                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9913
9914                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9915
9916                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9917                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9918         }
9919
9920         if (tg3_flag(tp, L1PLLPD_EN)) {
9921                 u32 grc_mode = tr32(GRC_MODE);
9922
9923                 /* Access the lower 1K of PL PCIE block registers. */
9924                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9925                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9926
9927                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9928                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9929                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9930
9931                 tw32(GRC_MODE, grc_mode);
9932         }
9933
9934         if (tg3_flag(tp, 57765_CLASS)) {
9935                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9936                         u32 grc_mode = tr32(GRC_MODE);
9937
9938                         /* Access the lower 1K of PL PCIE block registers. */
9939                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9940                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9941
9942                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9943                                    TG3_PCIE_PL_LO_PHYCTL5);
9944                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9945                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9946
9947                         tw32(GRC_MODE, grc_mode);
9948                 }
9949
9950                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9951                         u32 grc_mode;
9952
9953                         /* Fix transmit hangs */
9954                         val = tr32(TG3_CPMU_PADRNG_CTL);
9955                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9956                         tw32(TG3_CPMU_PADRNG_CTL, val);
9957
9958                         grc_mode = tr32(GRC_MODE);
9959
9960                         /* Access the lower 1K of DL PCIE block registers. */
9961                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9962                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9963
9964                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9965                                    TG3_PCIE_DL_LO_FTSMAX);
9966                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9967                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9968                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9969
9970                         tw32(GRC_MODE, grc_mode);
9971                 }
9972
9973                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9974                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9975                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9976                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9977         }
9978
9979         /* This works around an issue with Athlon chipsets on
9980          * B3 tigon3 silicon.  This bit has no effect on any
9981          * other revision.  But do not set this on PCI Express
9982          * chips and don't even touch the clocks if the CPMU is present.
9983          */
9984         if (!tg3_flag(tp, CPMU_PRESENT)) {
9985                 if (!tg3_flag(tp, PCI_EXPRESS))
9986                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9987                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9988         }
9989
9990         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9991             tg3_flag(tp, PCIX_MODE)) {
9992                 val = tr32(TG3PCI_PCISTATE);
9993                 val |= PCISTATE_RETRY_SAME_DMA;
9994                 tw32(TG3PCI_PCISTATE, val);
9995         }
9996
9997         if (tg3_flag(tp, ENABLE_APE)) {
9998                 /* Allow reads and writes to the
9999                  * APE register and memory space.
10000                  */
10001                 val = tr32(TG3PCI_PCISTATE);
10002                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10003                        PCISTATE_ALLOW_APE_SHMEM_WR |
10004                        PCISTATE_ALLOW_APE_PSPACE_WR;
10005                 tw32(TG3PCI_PCISTATE, val);
10006         }
10007
10008         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10009                 /* Enable some hw fixes.  */
10010                 val = tr32(TG3PCI_MSI_DATA);
10011                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10012                 tw32(TG3PCI_MSI_DATA, val);
10013         }
10014
10015         /* Descriptor ring init may make accesses to the
10016          * NIC SRAM area to setup the TX descriptors, so we
10017          * can only do this after the hardware has been
10018          * successfully reset.
10019          */
10020         err = tg3_init_rings(tp);
10021         if (err)
10022                 return err;
10023
10024         if (tg3_flag(tp, 57765_PLUS)) {
10025                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10026                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10027                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10028                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10029                 if (!tg3_flag(tp, 57765_CLASS) &&
10030                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10031                     tg3_asic_rev(tp) != ASIC_REV_5762)
10032                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10033                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10034         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10035                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10036                 /* This value is determined during the probe time DMA
10037                  * engine test, tg3_test_dma.
10038                  */
10039                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10040         }
10041
10042         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10043                           GRC_MODE_4X_NIC_SEND_RINGS |
10044                           GRC_MODE_NO_TX_PHDR_CSUM |
10045                           GRC_MODE_NO_RX_PHDR_CSUM);
10046         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10047
10048         /* Pseudo-header checksum is done by hardware logic and not
10049          * the offload processers, so make the chip do the pseudo-
10050          * header checksums on receive.  For transmit it is more
10051          * convenient to do the pseudo-header checksum in software
10052          * as Linux does that on transmit for us in all cases.
10053          */
10054         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10055
10056         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10057         if (tp->rxptpctl)
10058                 tw32(TG3_RX_PTP_CTL,
10059                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10060
10061         if (tg3_flag(tp, PTP_CAPABLE))
10062                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10063
10064         tw32(GRC_MODE, tp->grc_mode | val);
10065
10066         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10067          * south bridge limitation. As a workaround, Driver is setting MRRS
10068          * to 2048 instead of default 4096.
10069          */
10070         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10071             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10072                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10073                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10074         }
10075
10076         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10077         val = tr32(GRC_MISC_CFG);
10078         val &= ~0xff;
10079         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10080         tw32(GRC_MISC_CFG, val);
10081
10082         /* Initialize MBUF/DESC pool. */
10083         if (tg3_flag(tp, 5750_PLUS)) {
10084                 /* Do nothing.  */
10085         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10086                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10087                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10088                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10089                 else
10090                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10091                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10092                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10093         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10094                 int fw_len;
10095
10096                 fw_len = tp->fw_len;
10097                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10098                 tw32(BUFMGR_MB_POOL_ADDR,
10099                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10100                 tw32(BUFMGR_MB_POOL_SIZE,
10101                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10102         }
10103
10104         if (tp->dev->mtu <= ETH_DATA_LEN) {
10105                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10106                      tp->bufmgr_config.mbuf_read_dma_low_water);
10107                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10108                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10109                 tw32(BUFMGR_MB_HIGH_WATER,
10110                      tp->bufmgr_config.mbuf_high_water);
10111         } else {
10112                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10113                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10114                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10115                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10116                 tw32(BUFMGR_MB_HIGH_WATER,
10117                      tp->bufmgr_config.mbuf_high_water_jumbo);
10118         }
10119         tw32(BUFMGR_DMA_LOW_WATER,
10120              tp->bufmgr_config.dma_low_water);
10121         tw32(BUFMGR_DMA_HIGH_WATER,
10122              tp->bufmgr_config.dma_high_water);
10123
10124         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10125         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10126                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10127         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10128             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10129             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10130             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10131                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10132         tw32(BUFMGR_MODE, val);
10133         for (i = 0; i < 2000; i++) {
10134                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10135                         break;
10136                 udelay(10);
10137         }
10138         if (i >= 2000) {
10139                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10140                 return -ENODEV;
10141         }
10142
10143         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10144                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10145
10146         tg3_setup_rxbd_thresholds(tp);
10147
10148         /* Initialize TG3_BDINFO's at:
10149          *  RCVDBDI_STD_BD:     standard eth size rx ring
10150          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10151          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10152          *
10153          * like so:
10154          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10155          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10156          *                              ring attribute flags
10157          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10158          *
10159          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10160          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10161          *
10162          * The size of each ring is fixed in the firmware, but the location is
10163          * configurable.
10164          */
10165         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10166              ((u64) tpr->rx_std_mapping >> 32));
10167         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10168              ((u64) tpr->rx_std_mapping & 0xffffffff));
10169         if (!tg3_flag(tp, 5717_PLUS))
10170                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10171                      NIC_SRAM_RX_BUFFER_DESC);
10172
10173         /* Disable the mini ring */
10174         if (!tg3_flag(tp, 5705_PLUS))
10175                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10176                      BDINFO_FLAGS_DISABLED);
10177
10178         /* Program the jumbo buffer descriptor ring control
10179          * blocks on those devices that have them.
10180          */
10181         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10182             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10183
10184                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10185                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10186                              ((u64) tpr->rx_jmb_mapping >> 32));
10187                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10188                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10189                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10190                               BDINFO_FLAGS_MAXLEN_SHIFT;
10191                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10192                              val | BDINFO_FLAGS_USE_EXT_RECV);
10193                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10194                             tg3_flag(tp, 57765_CLASS) ||
10195                             tg3_asic_rev(tp) == ASIC_REV_5762)
10196                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10197                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10198                 } else {
10199                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10200                              BDINFO_FLAGS_DISABLED);
10201                 }
10202
10203                 if (tg3_flag(tp, 57765_PLUS)) {
10204                         val = TG3_RX_STD_RING_SIZE(tp);
10205                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10206                         val |= (TG3_RX_STD_DMA_SZ << 2);
10207                 } else
10208                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10209         } else
10210                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10211
10212         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10213
10214         tpr->rx_std_prod_idx = tp->rx_pending;
10215         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10216
10217         tpr->rx_jmb_prod_idx =
10218                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10219         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10220
10221         tg3_rings_reset(tp);
10222
10223         /* Initialize MAC address and backoff seed. */
10224         __tg3_set_mac_addr(tp, false);
10225
10226         /* MTU + ethernet header + FCS + optional VLAN tag */
10227         tw32(MAC_RX_MTU_SIZE,
10228              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10229
10230         /* The slot time is changed by tg3_setup_phy if we
10231          * run at gigabit with half duplex.
10232          */
10233         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10234               (6 << TX_LENGTHS_IPG_SHIFT) |
10235               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10236
10237         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10238             tg3_asic_rev(tp) == ASIC_REV_5762)
10239                 val |= tr32(MAC_TX_LENGTHS) &
10240                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10241                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10242
10243         tw32(MAC_TX_LENGTHS, val);
10244
10245         /* Receive rules. */
10246         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10247         tw32(RCVLPC_CONFIG, 0x0181);
10248
10249         /* Calculate RDMAC_MODE setting early, we need it to determine
10250          * the RCVLPC_STATE_ENABLE mask.
10251          */
10252         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10253                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10254                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10255                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10256                       RDMAC_MODE_LNGREAD_ENAB);
10257
10258         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10259                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10260
10261         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10262             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10263             tg3_asic_rev(tp) == ASIC_REV_57780)
10264                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10265                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10266                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10267
10268         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10269             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10270                 if (tg3_flag(tp, TSO_CAPABLE) &&
10271                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10272                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10273                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10274                            !tg3_flag(tp, IS_5788)) {
10275                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10276                 }
10277         }
10278
10279         if (tg3_flag(tp, PCI_EXPRESS))
10280                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10281
10282         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10283                 tp->dma_limit = 0;
10284                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10285                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10286                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10287                 }
10288         }
10289
10290         if (tg3_flag(tp, HW_TSO_1) ||
10291             tg3_flag(tp, HW_TSO_2) ||
10292             tg3_flag(tp, HW_TSO_3))
10293                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10294
10295         if (tg3_flag(tp, 57765_PLUS) ||
10296             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10297             tg3_asic_rev(tp) == ASIC_REV_57780)
10298                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10299
10300         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10301             tg3_asic_rev(tp) == ASIC_REV_5762)
10302                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10303
10304         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10305             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10306             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10307             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10308             tg3_flag(tp, 57765_PLUS)) {
10309                 u32 tgtreg;
10310
10311                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10312                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10313                 else
10314                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10315
10316                 val = tr32(tgtreg);
10317                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10318                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10319                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10320                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10321                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10322                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10323                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10324                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10325                 }
10326                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10327         }
10328
10329         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10330             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10331             tg3_asic_rev(tp) == ASIC_REV_5762) {
10332                 u32 tgtreg;
10333
10334                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10335                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10336                 else
10337                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10338
10339                 val = tr32(tgtreg);
10340                 tw32(tgtreg, val |
10341                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10342                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10343         }
10344
10345         /* Receive/send statistics. */
10346         if (tg3_flag(tp, 5750_PLUS)) {
10347                 val = tr32(RCVLPC_STATS_ENABLE);
10348                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10349                 tw32(RCVLPC_STATS_ENABLE, val);
10350         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10351                    tg3_flag(tp, TSO_CAPABLE)) {
10352                 val = tr32(RCVLPC_STATS_ENABLE);
10353                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10354                 tw32(RCVLPC_STATS_ENABLE, val);
10355         } else {
10356                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10357         }
10358         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10359         tw32(SNDDATAI_STATSENAB, 0xffffff);
10360         tw32(SNDDATAI_STATSCTRL,
10361              (SNDDATAI_SCTRL_ENABLE |
10362               SNDDATAI_SCTRL_FASTUPD));
10363
10364         /* Setup host coalescing engine. */
10365         tw32(HOSTCC_MODE, 0);
10366         for (i = 0; i < 2000; i++) {
10367                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10368                         break;
10369                 udelay(10);
10370         }
10371
10372         __tg3_set_coalesce(tp, &tp->coal);
10373
10374         if (!tg3_flag(tp, 5705_PLUS)) {
10375                 /* Status/statistics block address.  See tg3_timer,
10376                  * the tg3_periodic_fetch_stats call there, and
10377                  * tg3_get_stats to see how this works for 5705/5750 chips.
10378                  */
10379                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10380                      ((u64) tp->stats_mapping >> 32));
10381                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10382                      ((u64) tp->stats_mapping & 0xffffffff));
10383                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10384
10385                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10386
10387                 /* Clear statistics and status block memory areas */
10388                 for (i = NIC_SRAM_STATS_BLK;
10389                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10390                      i += sizeof(u32)) {
10391                         tg3_write_mem(tp, i, 0);
10392                         udelay(40);
10393                 }
10394         }
10395
10396         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10397
10398         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10399         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10400         if (!tg3_flag(tp, 5705_PLUS))
10401                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10402
10403         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10404                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10405                 /* reset to prevent losing 1st rx packet intermittently */
10406                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10407                 udelay(10);
10408         }
10409
10410         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10411                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10412                         MAC_MODE_FHDE_ENABLE;
10413         if (tg3_flag(tp, ENABLE_APE))
10414                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10415         if (!tg3_flag(tp, 5705_PLUS) &&
10416             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10417             tg3_asic_rev(tp) != ASIC_REV_5700)
10418                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10419         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10420         udelay(40);
10421
10422         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10423          * If TG3_FLAG_IS_NIC is zero, we should read the
10424          * register to preserve the GPIO settings for LOMs. The GPIOs,
10425          * whether used as inputs or outputs, are set by boot code after
10426          * reset.
10427          */
10428         if (!tg3_flag(tp, IS_NIC)) {
10429                 u32 gpio_mask;
10430
10431                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10432                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10433                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10434
10435                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10436                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10437                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10438
10439                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10440                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10441
10442                 tp->grc_local_ctrl &= ~gpio_mask;
10443                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10444
10445                 /* GPIO1 must be driven high for eeprom write protect */
10446                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10447                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10448                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10449         }
10450         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10451         udelay(100);
10452
10453         if (tg3_flag(tp, USING_MSIX)) {
10454                 val = tr32(MSGINT_MODE);
10455                 val |= MSGINT_MODE_ENABLE;
10456                 if (tp->irq_cnt > 1)
10457                         val |= MSGINT_MODE_MULTIVEC_EN;
10458                 if (!tg3_flag(tp, 1SHOT_MSI))
10459                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10460                 tw32(MSGINT_MODE, val);
10461         }
10462
10463         if (!tg3_flag(tp, 5705_PLUS)) {
10464                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10465                 udelay(40);
10466         }
10467
10468         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10469                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10470                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10471                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10472                WDMAC_MODE_LNGREAD_ENAB);
10473
10474         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10475             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10476                 if (tg3_flag(tp, TSO_CAPABLE) &&
10477                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10478                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10479                         /* nothing */
10480                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10481                            !tg3_flag(tp, IS_5788)) {
10482                         val |= WDMAC_MODE_RX_ACCEL;
10483                 }
10484         }
10485
10486         /* Enable host coalescing bug fix */
10487         if (tg3_flag(tp, 5755_PLUS))
10488                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10489
10490         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10491                 val |= WDMAC_MODE_BURST_ALL_DATA;
10492
10493         tw32_f(WDMAC_MODE, val);
10494         udelay(40);
10495
10496         if (tg3_flag(tp, PCIX_MODE)) {
10497                 u16 pcix_cmd;
10498
10499                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10500                                      &pcix_cmd);
10501                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10502                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10503                         pcix_cmd |= PCI_X_CMD_READ_2K;
10504                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10505                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10506                         pcix_cmd |= PCI_X_CMD_READ_2K;
10507                 }
10508                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10509                                       pcix_cmd);
10510         }
10511
10512         tw32_f(RDMAC_MODE, rdmac_mode);
10513         udelay(40);
10514
10515         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10516             tg3_asic_rev(tp) == ASIC_REV_5720) {
10517                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10518                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10519                                 break;
10520                 }
10521                 if (i < TG3_NUM_RDMA_CHANNELS) {
10522                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10523                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10524                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10525                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10526                 }
10527         }
10528
10529         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10530         if (!tg3_flag(tp, 5705_PLUS))
10531                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10532
10533         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10534                 tw32(SNDDATAC_MODE,
10535                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10536         else
10537                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10538
10539         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10540         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10541         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10542         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10543                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10544         tw32(RCVDBDI_MODE, val);
10545         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10546         if (tg3_flag(tp, HW_TSO_1) ||
10547             tg3_flag(tp, HW_TSO_2) ||
10548             tg3_flag(tp, HW_TSO_3))
10549                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10550         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10551         if (tg3_flag(tp, ENABLE_TSS))
10552                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10553         tw32(SNDBDI_MODE, val);
10554         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10555
10556         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10557                 err = tg3_load_5701_a0_firmware_fix(tp);
10558                 if (err)
10559                         return err;
10560         }
10561
10562         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10563                 /* Ignore any errors for the firmware download. If download
10564                  * fails, the device will operate with EEE disabled
10565                  */
10566                 tg3_load_57766_firmware(tp);
10567         }
10568
10569         if (tg3_flag(tp, TSO_CAPABLE)) {
10570                 err = tg3_load_tso_firmware(tp);
10571                 if (err)
10572                         return err;
10573         }
10574
10575         tp->tx_mode = TX_MODE_ENABLE;
10576
10577         if (tg3_flag(tp, 5755_PLUS) ||
10578             tg3_asic_rev(tp) == ASIC_REV_5906)
10579                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10580
10581         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10582             tg3_asic_rev(tp) == ASIC_REV_5762) {
10583                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10584                 tp->tx_mode &= ~val;
10585                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10586         }
10587
10588         tw32_f(MAC_TX_MODE, tp->tx_mode);
10589         udelay(100);
10590
10591         if (tg3_flag(tp, ENABLE_RSS)) {
10592                 u32 rss_key[10];
10593
10594                 tg3_rss_write_indir_tbl(tp);
10595
10596                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10597
10598                 for (i = 0; i < 10 ; i++)
10599                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10600         }
10601
10602         tp->rx_mode = RX_MODE_ENABLE;
10603         if (tg3_flag(tp, 5755_PLUS))
10604                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10605
10606         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10607                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10608
10609         if (tg3_flag(tp, ENABLE_RSS))
10610                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10611                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10612                                RX_MODE_RSS_IPV6_HASH_EN |
10613                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10614                                RX_MODE_RSS_IPV4_HASH_EN |
10615                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10616
10617         tw32_f(MAC_RX_MODE, tp->rx_mode);
10618         udelay(10);
10619
10620         tw32(MAC_LED_CTRL, tp->led_ctrl);
10621
10622         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10623         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10624                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10625                 udelay(10);
10626         }
10627         tw32_f(MAC_RX_MODE, tp->rx_mode);
10628         udelay(10);
10629
10630         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10631                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10632                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10633                         /* Set drive transmission level to 1.2V  */
10634                         /* only if the signal pre-emphasis bit is not set  */
10635                         val = tr32(MAC_SERDES_CFG);
10636                         val &= 0xfffff000;
10637                         val |= 0x880;
10638                         tw32(MAC_SERDES_CFG, val);
10639                 }
10640                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10641                         tw32(MAC_SERDES_CFG, 0x616000);
10642         }
10643
10644         /* Prevent chip from dropping frames when flow control
10645          * is enabled.
10646          */
10647         if (tg3_flag(tp, 57765_CLASS))
10648                 val = 1;
10649         else
10650                 val = 2;
10651         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10652
10653         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10654             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10655                 /* Use hardware link auto-negotiation */
10656                 tg3_flag_set(tp, HW_AUTONEG);
10657         }
10658
10659         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10660             tg3_asic_rev(tp) == ASIC_REV_5714) {
10661                 u32 tmp;
10662
10663                 tmp = tr32(SERDES_RX_CTRL);
10664                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10665                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10666                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10667                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10668         }
10669
10670         if (!tg3_flag(tp, USE_PHYLIB)) {
10671                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10672                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10673
10674                 err = tg3_setup_phy(tp, false);
10675                 if (err)
10676                         return err;
10677
10678                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10679                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10680                         u32 tmp;
10681
10682                         /* Clear CRC stats. */
10683                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10684                                 tg3_writephy(tp, MII_TG3_TEST1,
10685                                              tmp | MII_TG3_TEST1_CRC_EN);
10686                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10687                         }
10688                 }
10689         }
10690
10691         __tg3_set_rx_mode(tp->dev);
10692
10693         /* Initialize receive rules. */
10694         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10695         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10696         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10697         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10698
10699         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10700                 limit = 8;
10701         else
10702                 limit = 16;
10703         if (tg3_flag(tp, ENABLE_ASF))
10704                 limit -= 4;
10705         switch (limit) {
10706         case 16:
10707                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10708         case 15:
10709                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10710         case 14:
10711                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10712         case 13:
10713                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10714         case 12:
10715                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10716         case 11:
10717                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10718         case 10:
10719                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10720         case 9:
10721                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10722         case 8:
10723                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10724         case 7:
10725                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10726         case 6:
10727                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10728         case 5:
10729                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10730         case 4:
10731                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10732         case 3:
10733                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10734         case 2:
10735         case 1:
10736
10737         default:
10738                 break;
10739         }
10740
10741         if (tg3_flag(tp, ENABLE_APE))
10742                 /* Write our heartbeat update interval to APE. */
10743                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10744                                 APE_HOST_HEARTBEAT_INT_5SEC);
10745
10746         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10747
10748         return 0;
10749 }
10750
10751 /* Called at device open time to get the chip ready for
10752  * packet processing.  Invoked with tp->lock held.
10753  */
10754 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10755 {
10756         /* Chip may have been just powered on. If so, the boot code may still
10757          * be running initialization. Wait for it to finish to avoid races in
10758          * accessing the hardware.
10759          */
10760         tg3_enable_register_access(tp);
10761         tg3_poll_fw(tp);
10762
10763         tg3_switch_clocks(tp);
10764
10765         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10766
10767         return tg3_reset_hw(tp, reset_phy);
10768 }
10769
10770 #ifdef CONFIG_TIGON3_HWMON
10771 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10772 {
10773         int i;
10774
10775         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10776                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10777
10778                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10779                 off += len;
10780
10781                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10782                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10783                         memset(ocir, 0, TG3_OCIR_LEN);
10784         }
10785 }
10786
10787 /* sysfs attributes for hwmon */
10788 static ssize_t tg3_show_temp(struct device *dev,
10789                              struct device_attribute *devattr, char *buf)
10790 {
10791         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10792         struct tg3 *tp = dev_get_drvdata(dev);
10793         u32 temperature;
10794
10795         spin_lock_bh(&tp->lock);
10796         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10797                                 sizeof(temperature));
10798         spin_unlock_bh(&tp->lock);
10799         return sprintf(buf, "%u\n", temperature * 1000);
10800 }
10801
10802
10803 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10804                           TG3_TEMP_SENSOR_OFFSET);
10805 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10806                           TG3_TEMP_CAUTION_OFFSET);
10807 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10808                           TG3_TEMP_MAX_OFFSET);
10809
10810 static struct attribute *tg3_attrs[] = {
10811         &sensor_dev_attr_temp1_input.dev_attr.attr,
10812         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10813         &sensor_dev_attr_temp1_max.dev_attr.attr,
10814         NULL
10815 };
10816 ATTRIBUTE_GROUPS(tg3);
10817
10818 static void tg3_hwmon_close(struct tg3 *tp)
10819 {
10820         if (tp->hwmon_dev) {
10821                 hwmon_device_unregister(tp->hwmon_dev);
10822                 tp->hwmon_dev = NULL;
10823         }
10824 }
10825
10826 static void tg3_hwmon_open(struct tg3 *tp)
10827 {
10828         int i;
10829         u32 size = 0;
10830         struct pci_dev *pdev = tp->pdev;
10831         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10832
10833         tg3_sd_scan_scratchpad(tp, ocirs);
10834
10835         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10836                 if (!ocirs[i].src_data_length)
10837                         continue;
10838
10839                 size += ocirs[i].src_hdr_length;
10840                 size += ocirs[i].src_data_length;
10841         }
10842
10843         if (!size)
10844                 return;
10845
10846         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10847                                                           tp, tg3_groups);
10848         if (IS_ERR(tp->hwmon_dev)) {
10849                 tp->hwmon_dev = NULL;
10850                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10851         }
10852 }
10853 #else
10854 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10855 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10856 #endif /* CONFIG_TIGON3_HWMON */
10857
10858
10859 #define TG3_STAT_ADD32(PSTAT, REG) \
10860 do {    u32 __val = tr32(REG); \
10861         (PSTAT)->low += __val; \
10862         if ((PSTAT)->low < __val) \
10863                 (PSTAT)->high += 1; \
10864 } while (0)
10865
10866 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10867 {
10868         struct tg3_hw_stats *sp = tp->hw_stats;
10869
10870         if (!tp->link_up)
10871                 return;
10872
10873         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10874         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10875         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10876         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10877         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10878         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10879         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10880         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10881         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10882         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10883         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10884         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10885         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10886         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10887                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10888                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10889                 u32 val;
10890
10891                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10892                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10893                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10894                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10895         }
10896
10897         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10898         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10899         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10900         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10901         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10902         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10903         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10904         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10905         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10906         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10907         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10908         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10909         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10910         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10911
10912         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10913         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10914             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10915             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10916             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10917                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10918         } else {
10919                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10920                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10921                 if (val) {
10922                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10923                         sp->rx_discards.low += val;
10924                         if (sp->rx_discards.low < val)
10925                                 sp->rx_discards.high += 1;
10926                 }
10927                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10928         }
10929         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10930 }
10931
10932 static void tg3_chk_missed_msi(struct tg3 *tp)
10933 {
10934         u32 i;
10935
10936         for (i = 0; i < tp->irq_cnt; i++) {
10937                 struct tg3_napi *tnapi = &tp->napi[i];
10938
10939                 if (tg3_has_work(tnapi)) {
10940                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10941                             tnapi->last_tx_cons == tnapi->tx_cons) {
10942                                 if (tnapi->chk_msi_cnt < 1) {
10943                                         tnapi->chk_msi_cnt++;
10944                                         return;
10945                                 }
10946                                 tg3_msi(0, tnapi);
10947                         }
10948                 }
10949                 tnapi->chk_msi_cnt = 0;
10950                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10951                 tnapi->last_tx_cons = tnapi->tx_cons;
10952         }
10953 }
10954
10955 static void tg3_timer(struct timer_list *t)
10956 {
10957         struct tg3 *tp = from_timer(tp, t, timer);
10958
10959         spin_lock(&tp->lock);
10960
10961         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10962                 spin_unlock(&tp->lock);
10963                 goto restart_timer;
10964         }
10965
10966         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10967             tg3_flag(tp, 57765_CLASS))
10968                 tg3_chk_missed_msi(tp);
10969
10970         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10971                 /* BCM4785: Flush posted writes from GbE to host memory. */
10972                 tr32(HOSTCC_MODE);
10973         }
10974
10975         if (!tg3_flag(tp, TAGGED_STATUS)) {
10976                 /* All of this garbage is because when using non-tagged
10977                  * IRQ status the mailbox/status_block protocol the chip
10978                  * uses with the cpu is race prone.
10979                  */
10980                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10981                         tw32(GRC_LOCAL_CTRL,
10982                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10983                 } else {
10984                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10985                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10986                 }
10987
10988                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10989                         spin_unlock(&tp->lock);
10990                         tg3_reset_task_schedule(tp);
10991                         goto restart_timer;
10992                 }
10993         }
10994
10995         /* This part only runs once per second. */
10996         if (!--tp->timer_counter) {
10997                 if (tg3_flag(tp, 5705_PLUS))
10998                         tg3_periodic_fetch_stats(tp);
10999
11000                 if (tp->setlpicnt && !--tp->setlpicnt)
11001                         tg3_phy_eee_enable(tp);
11002
11003                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11004                         u32 mac_stat;
11005                         int phy_event;
11006
11007                         mac_stat = tr32(MAC_STATUS);
11008
11009                         phy_event = 0;
11010                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11011                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11012                                         phy_event = 1;
11013                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11014                                 phy_event = 1;
11015
11016                         if (phy_event)
11017                                 tg3_setup_phy(tp, false);
11018                 } else if (tg3_flag(tp, POLL_SERDES)) {
11019                         u32 mac_stat = tr32(MAC_STATUS);
11020                         int need_setup = 0;
11021
11022                         if (tp->link_up &&
11023                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11024                                 need_setup = 1;
11025                         }
11026                         if (!tp->link_up &&
11027                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11028                                          MAC_STATUS_SIGNAL_DET))) {
11029                                 need_setup = 1;
11030                         }
11031                         if (need_setup) {
11032                                 if (!tp->serdes_counter) {
11033                                         tw32_f(MAC_MODE,
11034                                              (tp->mac_mode &
11035                                               ~MAC_MODE_PORT_MODE_MASK));
11036                                         udelay(40);
11037                                         tw32_f(MAC_MODE, tp->mac_mode);
11038                                         udelay(40);
11039                                 }
11040                                 tg3_setup_phy(tp, false);
11041                         }
11042                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11043                            tg3_flag(tp, 5780_CLASS)) {
11044                         tg3_serdes_parallel_detect(tp);
11045                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11046                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11047                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11048                                          TG3_CPMU_STATUS_LINK_MASK);
11049
11050                         if (link_up != tp->link_up)
11051                                 tg3_setup_phy(tp, false);
11052                 }
11053
11054                 tp->timer_counter = tp->timer_multiplier;
11055         }
11056
11057         /* Heartbeat is only sent once every 2 seconds.
11058          *
11059          * The heartbeat is to tell the ASF firmware that the host
11060          * driver is still alive.  In the event that the OS crashes,
11061          * ASF needs to reset the hardware to free up the FIFO space
11062          * that may be filled with rx packets destined for the host.
11063          * If the FIFO is full, ASF will no longer function properly.
11064          *
11065          * Unintended resets have been reported on real time kernels
11066          * where the timer doesn't run on time.  Netpoll will also have
11067          * same problem.
11068          *
11069          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11070          * to check the ring condition when the heartbeat is expiring
11071          * before doing the reset.  This will prevent most unintended
11072          * resets.
11073          */
11074         if (!--tp->asf_counter) {
11075                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11076                         tg3_wait_for_event_ack(tp);
11077
11078                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11079                                       FWCMD_NICDRV_ALIVE3);
11080                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11081                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11082                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11083
11084                         tg3_generate_fw_event(tp);
11085                 }
11086                 tp->asf_counter = tp->asf_multiplier;
11087         }
11088
11089         /* Update the APE heartbeat every 5 seconds.*/
11090         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11091
11092         spin_unlock(&tp->lock);
11093
11094 restart_timer:
11095         tp->timer.expires = jiffies + tp->timer_offset;
11096         add_timer(&tp->timer);
11097 }
11098
11099 static void tg3_timer_init(struct tg3 *tp)
11100 {
11101         if (tg3_flag(tp, TAGGED_STATUS) &&
11102             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11103             !tg3_flag(tp, 57765_CLASS))
11104                 tp->timer_offset = HZ;
11105         else
11106                 tp->timer_offset = HZ / 10;
11107
11108         BUG_ON(tp->timer_offset > HZ);
11109
11110         tp->timer_multiplier = (HZ / tp->timer_offset);
11111         tp->asf_multiplier = (HZ / tp->timer_offset) *
11112                              TG3_FW_UPDATE_FREQ_SEC;
11113
11114         timer_setup(&tp->timer, tg3_timer, 0);
11115 }
11116
11117 static void tg3_timer_start(struct tg3 *tp)
11118 {
11119         tp->asf_counter   = tp->asf_multiplier;
11120         tp->timer_counter = tp->timer_multiplier;
11121
11122         tp->timer.expires = jiffies + tp->timer_offset;
11123         add_timer(&tp->timer);
11124 }
11125
11126 static void tg3_timer_stop(struct tg3 *tp)
11127 {
11128         del_timer_sync(&tp->timer);
11129 }
11130
11131 /* Restart hardware after configuration changes, self-test, etc.
11132  * Invoked with tp->lock held.
11133  */
11134 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11135         __releases(tp->lock)
11136         __acquires(tp->lock)
11137 {
11138         int err;
11139
11140         err = tg3_init_hw(tp, reset_phy);
11141         if (err) {
11142                 netdev_err(tp->dev,
11143                            "Failed to re-initialize device, aborting\n");
11144                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11145                 tg3_full_unlock(tp);
11146                 tg3_timer_stop(tp);
11147                 tp->irq_sync = 0;
11148                 tg3_napi_enable(tp);
11149                 dev_close(tp->dev);
11150                 tg3_full_lock(tp, 0);
11151         }
11152         return err;
11153 }
11154
11155 static void tg3_reset_task(struct work_struct *work)
11156 {
11157         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11158         int err;
11159
11160         rtnl_lock();
11161         tg3_full_lock(tp, 0);
11162
11163         if (!netif_running(tp->dev)) {
11164                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11165                 tg3_full_unlock(tp);
11166                 rtnl_unlock();
11167                 return;
11168         }
11169
11170         tg3_full_unlock(tp);
11171
11172         tg3_phy_stop(tp);
11173
11174         tg3_netif_stop(tp);
11175
11176         tg3_full_lock(tp, 1);
11177
11178         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11179                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11180                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11181                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11182                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11183         }
11184
11185         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11186         err = tg3_init_hw(tp, true);
11187         if (err)
11188                 goto out;
11189
11190         tg3_netif_start(tp);
11191
11192 out:
11193         tg3_full_unlock(tp);
11194
11195         if (!err)
11196                 tg3_phy_start(tp);
11197
11198         tg3_flag_clear(tp, RESET_TASK_PENDING);
11199         rtnl_unlock();
11200 }
11201
11202 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11203 {
11204         irq_handler_t fn;
11205         unsigned long flags;
11206         char *name;
11207         struct tg3_napi *tnapi = &tp->napi[irq_num];
11208
11209         if (tp->irq_cnt == 1)
11210                 name = tp->dev->name;
11211         else {
11212                 name = &tnapi->irq_lbl[0];
11213                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11214                         snprintf(name, IFNAMSIZ,
11215                                  "%s-txrx-%d", tp->dev->name, irq_num);
11216                 else if (tnapi->tx_buffers)
11217                         snprintf(name, IFNAMSIZ,
11218                                  "%s-tx-%d", tp->dev->name, irq_num);
11219                 else if (tnapi->rx_rcb)
11220                         snprintf(name, IFNAMSIZ,
11221                                  "%s-rx-%d", tp->dev->name, irq_num);
11222                 else
11223                         snprintf(name, IFNAMSIZ,
11224                                  "%s-%d", tp->dev->name, irq_num);
11225                 name[IFNAMSIZ-1] = 0;
11226         }
11227
11228         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11229                 fn = tg3_msi;
11230                 if (tg3_flag(tp, 1SHOT_MSI))
11231                         fn = tg3_msi_1shot;
11232                 flags = 0;
11233         } else {
11234                 fn = tg3_interrupt;
11235                 if (tg3_flag(tp, TAGGED_STATUS))
11236                         fn = tg3_interrupt_tagged;
11237                 flags = IRQF_SHARED;
11238         }
11239
11240         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11241 }
11242
11243 static int tg3_test_interrupt(struct tg3 *tp)
11244 {
11245         struct tg3_napi *tnapi = &tp->napi[0];
11246         struct net_device *dev = tp->dev;
11247         int err, i, intr_ok = 0;
11248         u32 val;
11249
11250         if (!netif_running(dev))
11251                 return -ENODEV;
11252
11253         tg3_disable_ints(tp);
11254
11255         free_irq(tnapi->irq_vec, tnapi);
11256
11257         /*
11258          * Turn off MSI one shot mode.  Otherwise this test has no
11259          * observable way to know whether the interrupt was delivered.
11260          */
11261         if (tg3_flag(tp, 57765_PLUS)) {
11262                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11263                 tw32(MSGINT_MODE, val);
11264         }
11265
11266         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11267                           IRQF_SHARED, dev->name, tnapi);
11268         if (err)
11269                 return err;
11270
11271         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11272         tg3_enable_ints(tp);
11273
11274         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11275                tnapi->coal_now);
11276
11277         for (i = 0; i < 5; i++) {
11278                 u32 int_mbox, misc_host_ctrl;
11279
11280                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11281                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11282
11283                 if ((int_mbox != 0) ||
11284                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11285                         intr_ok = 1;
11286                         break;
11287                 }
11288
11289                 if (tg3_flag(tp, 57765_PLUS) &&
11290                     tnapi->hw_status->status_tag != tnapi->last_tag)
11291                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11292
11293                 msleep(10);
11294         }
11295
11296         tg3_disable_ints(tp);
11297
11298         free_irq(tnapi->irq_vec, tnapi);
11299
11300         err = tg3_request_irq(tp, 0);
11301
11302         if (err)
11303                 return err;
11304
11305         if (intr_ok) {
11306                 /* Reenable MSI one shot mode. */
11307                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11308                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11309                         tw32(MSGINT_MODE, val);
11310                 }
11311                 return 0;
11312         }
11313
11314         return -EIO;
11315 }
11316
11317 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11318  * successfully restored
11319  */
11320 static int tg3_test_msi(struct tg3 *tp)
11321 {
11322         int err;
11323         u16 pci_cmd;
11324
11325         if (!tg3_flag(tp, USING_MSI))
11326                 return 0;
11327
11328         /* Turn off SERR reporting in case MSI terminates with Master
11329          * Abort.
11330          */
11331         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11332         pci_write_config_word(tp->pdev, PCI_COMMAND,
11333                               pci_cmd & ~PCI_COMMAND_SERR);
11334
11335         err = tg3_test_interrupt(tp);
11336
11337         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11338
11339         if (!err)
11340                 return 0;
11341
11342         /* other failures */
11343         if (err != -EIO)
11344                 return err;
11345
11346         /* MSI test failed, go back to INTx mode */
11347         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11348                     "to INTx mode. Please report this failure to the PCI "
11349                     "maintainer and include system chipset information\n");
11350
11351         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11352
11353         pci_disable_msi(tp->pdev);
11354
11355         tg3_flag_clear(tp, USING_MSI);
11356         tp->napi[0].irq_vec = tp->pdev->irq;
11357
11358         err = tg3_request_irq(tp, 0);
11359         if (err)
11360                 return err;
11361
11362         /* Need to reset the chip because the MSI cycle may have terminated
11363          * with Master Abort.
11364          */
11365         tg3_full_lock(tp, 1);
11366
11367         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11368         err = tg3_init_hw(tp, true);
11369
11370         tg3_full_unlock(tp);
11371
11372         if (err)
11373                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11374
11375         return err;
11376 }
11377
11378 static int tg3_request_firmware(struct tg3 *tp)
11379 {
11380         const struct tg3_firmware_hdr *fw_hdr;
11381
11382         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11383                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11384                            tp->fw_needed);
11385                 return -ENOENT;
11386         }
11387
11388         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11389
11390         /* Firmware blob starts with version numbers, followed by
11391          * start address and _full_ length including BSS sections
11392          * (which must be longer than the actual data, of course
11393          */
11394
11395         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11396         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11397                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11398                            tp->fw_len, tp->fw_needed);
11399                 release_firmware(tp->fw);
11400                 tp->fw = NULL;
11401                 return -EINVAL;
11402         }
11403
11404         /* We no longer need firmware; we have it. */
11405         tp->fw_needed = NULL;
11406         return 0;
11407 }
11408
11409 static u32 tg3_irq_count(struct tg3 *tp)
11410 {
11411         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11412
11413         if (irq_cnt > 1) {
11414                 /* We want as many rx rings enabled as there are cpus.
11415                  * In multiqueue MSI-X mode, the first MSI-X vector
11416                  * only deals with link interrupts, etc, so we add
11417                  * one to the number of vectors we are requesting.
11418                  */
11419                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11420         }
11421
11422         return irq_cnt;
11423 }
11424
11425 static bool tg3_enable_msix(struct tg3 *tp)
11426 {
11427         int i, rc;
11428         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11429
11430         tp->txq_cnt = tp->txq_req;
11431         tp->rxq_cnt = tp->rxq_req;
11432         if (!tp->rxq_cnt)
11433                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11434         if (tp->rxq_cnt > tp->rxq_max)
11435                 tp->rxq_cnt = tp->rxq_max;
11436
11437         /* Disable multiple TX rings by default.  Simple round-robin hardware
11438          * scheduling of the TX rings can cause starvation of rings with
11439          * small packets when other rings have TSO or jumbo packets.
11440          */
11441         if (!tp->txq_req)
11442                 tp->txq_cnt = 1;
11443
11444         tp->irq_cnt = tg3_irq_count(tp);
11445
11446         for (i = 0; i < tp->irq_max; i++) {
11447                 msix_ent[i].entry  = i;
11448                 msix_ent[i].vector = 0;
11449         }
11450
11451         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11452         if (rc < 0) {
11453                 return false;
11454         } else if (rc < tp->irq_cnt) {
11455                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11456                               tp->irq_cnt, rc);
11457                 tp->irq_cnt = rc;
11458                 tp->rxq_cnt = max(rc - 1, 1);
11459                 if (tp->txq_cnt)
11460                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11461         }
11462
11463         for (i = 0; i < tp->irq_max; i++)
11464                 tp->napi[i].irq_vec = msix_ent[i].vector;
11465
11466         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11467                 pci_disable_msix(tp->pdev);
11468                 return false;
11469         }
11470
11471         if (tp->irq_cnt == 1)
11472                 return true;
11473
11474         tg3_flag_set(tp, ENABLE_RSS);
11475
11476         if (tp->txq_cnt > 1)
11477                 tg3_flag_set(tp, ENABLE_TSS);
11478
11479         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11480
11481         return true;
11482 }
11483
11484 static void tg3_ints_init(struct tg3 *tp)
11485 {
11486         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11487             !tg3_flag(tp, TAGGED_STATUS)) {
11488                 /* All MSI supporting chips should support tagged
11489                  * status.  Assert that this is the case.
11490                  */
11491                 netdev_warn(tp->dev,
11492                             "MSI without TAGGED_STATUS? Not using MSI\n");
11493                 goto defcfg;
11494         }
11495
11496         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11497                 tg3_flag_set(tp, USING_MSIX);
11498         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11499                 tg3_flag_set(tp, USING_MSI);
11500
11501         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11502                 u32 msi_mode = tr32(MSGINT_MODE);
11503                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11504                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11505                 if (!tg3_flag(tp, 1SHOT_MSI))
11506                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11507                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11508         }
11509 defcfg:
11510         if (!tg3_flag(tp, USING_MSIX)) {
11511                 tp->irq_cnt = 1;
11512                 tp->napi[0].irq_vec = tp->pdev->irq;
11513         }
11514
11515         if (tp->irq_cnt == 1) {
11516                 tp->txq_cnt = 1;
11517                 tp->rxq_cnt = 1;
11518                 netif_set_real_num_tx_queues(tp->dev, 1);
11519                 netif_set_real_num_rx_queues(tp->dev, 1);
11520         }
11521 }
11522
11523 static void tg3_ints_fini(struct tg3 *tp)
11524 {
11525         if (tg3_flag(tp, USING_MSIX))
11526                 pci_disable_msix(tp->pdev);
11527         else if (tg3_flag(tp, USING_MSI))
11528                 pci_disable_msi(tp->pdev);
11529         tg3_flag_clear(tp, USING_MSI);
11530         tg3_flag_clear(tp, USING_MSIX);
11531         tg3_flag_clear(tp, ENABLE_RSS);
11532         tg3_flag_clear(tp, ENABLE_TSS);
11533 }
11534
11535 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11536                      bool init)
11537 {
11538         struct net_device *dev = tp->dev;
11539         int i, err;
11540
11541         /*
11542          * Setup interrupts first so we know how
11543          * many NAPI resources to allocate
11544          */
11545         tg3_ints_init(tp);
11546
11547         tg3_rss_check_indir_tbl(tp);
11548
11549         /* The placement of this call is tied
11550          * to the setup and use of Host TX descriptors.
11551          */
11552         err = tg3_alloc_consistent(tp);
11553         if (err)
11554                 goto out_ints_fini;
11555
11556         tg3_napi_init(tp);
11557
11558         tg3_napi_enable(tp);
11559
11560         for (i = 0; i < tp->irq_cnt; i++) {
11561                 err = tg3_request_irq(tp, i);
11562                 if (err) {
11563                         for (i--; i >= 0; i--) {
11564                                 struct tg3_napi *tnapi = &tp->napi[i];
11565
11566                                 free_irq(tnapi->irq_vec, tnapi);
11567                         }
11568                         goto out_napi_fini;
11569                 }
11570         }
11571
11572         tg3_full_lock(tp, 0);
11573
11574         if (init)
11575                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11576
11577         err = tg3_init_hw(tp, reset_phy);
11578         if (err) {
11579                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11580                 tg3_free_rings(tp);
11581         }
11582
11583         tg3_full_unlock(tp);
11584
11585         if (err)
11586                 goto out_free_irq;
11587
11588         if (test_irq && tg3_flag(tp, USING_MSI)) {
11589                 err = tg3_test_msi(tp);
11590
11591                 if (err) {
11592                         tg3_full_lock(tp, 0);
11593                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11594                         tg3_free_rings(tp);
11595                         tg3_full_unlock(tp);
11596
11597                         goto out_napi_fini;
11598                 }
11599
11600                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11601                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11602
11603                         tw32(PCIE_TRANSACTION_CFG,
11604                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11605                 }
11606         }
11607
11608         tg3_phy_start(tp);
11609
11610         tg3_hwmon_open(tp);
11611
11612         tg3_full_lock(tp, 0);
11613
11614         tg3_timer_start(tp);
11615         tg3_flag_set(tp, INIT_COMPLETE);
11616         tg3_enable_ints(tp);
11617
11618         tg3_ptp_resume(tp);
11619
11620         tg3_full_unlock(tp);
11621
11622         netif_tx_start_all_queues(dev);
11623
11624         /*
11625          * Reset loopback feature if it was turned on while the device was down
11626          * make sure that it's installed properly now.
11627          */
11628         if (dev->features & NETIF_F_LOOPBACK)
11629                 tg3_set_loopback(dev, dev->features);
11630
11631         return 0;
11632
11633 out_free_irq:
11634         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11635                 struct tg3_napi *tnapi = &tp->napi[i];
11636                 free_irq(tnapi->irq_vec, tnapi);
11637         }
11638
11639 out_napi_fini:
11640         tg3_napi_disable(tp);
11641         tg3_napi_fini(tp);
11642         tg3_free_consistent(tp);
11643
11644 out_ints_fini:
11645         tg3_ints_fini(tp);
11646
11647         return err;
11648 }
11649
11650 static void tg3_stop(struct tg3 *tp)
11651 {
11652         int i;
11653
11654         tg3_reset_task_cancel(tp);
11655         tg3_netif_stop(tp);
11656
11657         tg3_timer_stop(tp);
11658
11659         tg3_hwmon_close(tp);
11660
11661         tg3_phy_stop(tp);
11662
11663         tg3_full_lock(tp, 1);
11664
11665         tg3_disable_ints(tp);
11666
11667         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11668         tg3_free_rings(tp);
11669         tg3_flag_clear(tp, INIT_COMPLETE);
11670
11671         tg3_full_unlock(tp);
11672
11673         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11674                 struct tg3_napi *tnapi = &tp->napi[i];
11675                 free_irq(tnapi->irq_vec, tnapi);
11676         }
11677
11678         tg3_ints_fini(tp);
11679
11680         tg3_napi_fini(tp);
11681
11682         tg3_free_consistent(tp);
11683 }
11684
11685 static int tg3_open(struct net_device *dev)
11686 {
11687         struct tg3 *tp = netdev_priv(dev);
11688         int err;
11689
11690         if (tp->pcierr_recovery) {
11691                 netdev_err(dev, "Failed to open device. PCI error recovery "
11692                            "in progress\n");
11693                 return -EAGAIN;
11694         }
11695
11696         if (tp->fw_needed) {
11697                 err = tg3_request_firmware(tp);
11698                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11699                         if (err) {
11700                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11701                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11702                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11703                                 netdev_warn(tp->dev, "EEE capability restored\n");
11704                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11705                         }
11706                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11707                         if (err)
11708                                 return err;
11709                 } else if (err) {
11710                         netdev_warn(tp->dev, "TSO capability disabled\n");
11711                         tg3_flag_clear(tp, TSO_CAPABLE);
11712                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11713                         netdev_notice(tp->dev, "TSO capability restored\n");
11714                         tg3_flag_set(tp, TSO_CAPABLE);
11715                 }
11716         }
11717
11718         tg3_carrier_off(tp);
11719
11720         err = tg3_power_up(tp);
11721         if (err)
11722                 return err;
11723
11724         tg3_full_lock(tp, 0);
11725
11726         tg3_disable_ints(tp);
11727         tg3_flag_clear(tp, INIT_COMPLETE);
11728
11729         tg3_full_unlock(tp);
11730
11731         err = tg3_start(tp,
11732                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11733                         true, true);
11734         if (err) {
11735                 tg3_frob_aux_power(tp, false);
11736                 pci_set_power_state(tp->pdev, PCI_D3hot);
11737         }
11738
11739         return err;
11740 }
11741
11742 static int tg3_close(struct net_device *dev)
11743 {
11744         struct tg3 *tp = netdev_priv(dev);
11745
11746         if (tp->pcierr_recovery) {
11747                 netdev_err(dev, "Failed to close device. PCI error recovery "
11748                            "in progress\n");
11749                 return -EAGAIN;
11750         }
11751
11752         tg3_stop(tp);
11753
11754         if (pci_device_is_present(tp->pdev)) {
11755                 tg3_power_down_prepare(tp);
11756
11757                 tg3_carrier_off(tp);
11758         }
11759         return 0;
11760 }
11761
11762 static inline u64 get_stat64(tg3_stat64_t *val)
11763 {
11764        return ((u64)val->high << 32) | ((u64)val->low);
11765 }
11766
11767 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11768 {
11769         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11770
11771         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11772             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11773              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11774                 u32 val;
11775
11776                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11777                         tg3_writephy(tp, MII_TG3_TEST1,
11778                                      val | MII_TG3_TEST1_CRC_EN);
11779                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11780                 } else
11781                         val = 0;
11782
11783                 tp->phy_crc_errors += val;
11784
11785                 return tp->phy_crc_errors;
11786         }
11787
11788         return get_stat64(&hw_stats->rx_fcs_errors);
11789 }
11790
11791 #define ESTAT_ADD(member) \
11792         estats->member =        old_estats->member + \
11793                                 get_stat64(&hw_stats->member)
11794
11795 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11796 {
11797         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11798         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11799
11800         ESTAT_ADD(rx_octets);
11801         ESTAT_ADD(rx_fragments);
11802         ESTAT_ADD(rx_ucast_packets);
11803         ESTAT_ADD(rx_mcast_packets);
11804         ESTAT_ADD(rx_bcast_packets);
11805         ESTAT_ADD(rx_fcs_errors);
11806         ESTAT_ADD(rx_align_errors);
11807         ESTAT_ADD(rx_xon_pause_rcvd);
11808         ESTAT_ADD(rx_xoff_pause_rcvd);
11809         ESTAT_ADD(rx_mac_ctrl_rcvd);
11810         ESTAT_ADD(rx_xoff_entered);
11811         ESTAT_ADD(rx_frame_too_long_errors);
11812         ESTAT_ADD(rx_jabbers);
11813         ESTAT_ADD(rx_undersize_packets);
11814         ESTAT_ADD(rx_in_length_errors);
11815         ESTAT_ADD(rx_out_length_errors);
11816         ESTAT_ADD(rx_64_or_less_octet_packets);
11817         ESTAT_ADD(rx_65_to_127_octet_packets);
11818         ESTAT_ADD(rx_128_to_255_octet_packets);
11819         ESTAT_ADD(rx_256_to_511_octet_packets);
11820         ESTAT_ADD(rx_512_to_1023_octet_packets);
11821         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11822         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11823         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11824         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11825         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11826
11827         ESTAT_ADD(tx_octets);
11828         ESTAT_ADD(tx_collisions);
11829         ESTAT_ADD(tx_xon_sent);
11830         ESTAT_ADD(tx_xoff_sent);
11831         ESTAT_ADD(tx_flow_control);
11832         ESTAT_ADD(tx_mac_errors);
11833         ESTAT_ADD(tx_single_collisions);
11834         ESTAT_ADD(tx_mult_collisions);
11835         ESTAT_ADD(tx_deferred);
11836         ESTAT_ADD(tx_excessive_collisions);
11837         ESTAT_ADD(tx_late_collisions);
11838         ESTAT_ADD(tx_collide_2times);
11839         ESTAT_ADD(tx_collide_3times);
11840         ESTAT_ADD(tx_collide_4times);
11841         ESTAT_ADD(tx_collide_5times);
11842         ESTAT_ADD(tx_collide_6times);
11843         ESTAT_ADD(tx_collide_7times);
11844         ESTAT_ADD(tx_collide_8times);
11845         ESTAT_ADD(tx_collide_9times);
11846         ESTAT_ADD(tx_collide_10times);
11847         ESTAT_ADD(tx_collide_11times);
11848         ESTAT_ADD(tx_collide_12times);
11849         ESTAT_ADD(tx_collide_13times);
11850         ESTAT_ADD(tx_collide_14times);
11851         ESTAT_ADD(tx_collide_15times);
11852         ESTAT_ADD(tx_ucast_packets);
11853         ESTAT_ADD(tx_mcast_packets);
11854         ESTAT_ADD(tx_bcast_packets);
11855         ESTAT_ADD(tx_carrier_sense_errors);
11856         ESTAT_ADD(tx_discards);
11857         ESTAT_ADD(tx_errors);
11858
11859         ESTAT_ADD(dma_writeq_full);
11860         ESTAT_ADD(dma_write_prioq_full);
11861         ESTAT_ADD(rxbds_empty);
11862         ESTAT_ADD(rx_discards);
11863         ESTAT_ADD(rx_errors);
11864         ESTAT_ADD(rx_threshold_hit);
11865
11866         ESTAT_ADD(dma_readq_full);
11867         ESTAT_ADD(dma_read_prioq_full);
11868         ESTAT_ADD(tx_comp_queue_full);
11869
11870         ESTAT_ADD(ring_set_send_prod_index);
11871         ESTAT_ADD(ring_status_update);
11872         ESTAT_ADD(nic_irqs);
11873         ESTAT_ADD(nic_avoided_irqs);
11874         ESTAT_ADD(nic_tx_threshold_hit);
11875
11876         ESTAT_ADD(mbuf_lwm_thresh_hit);
11877 }
11878
11879 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11880 {
11881         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11882         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11883
11884         stats->rx_packets = old_stats->rx_packets +
11885                 get_stat64(&hw_stats->rx_ucast_packets) +
11886                 get_stat64(&hw_stats->rx_mcast_packets) +
11887                 get_stat64(&hw_stats->rx_bcast_packets);
11888
11889         stats->tx_packets = old_stats->tx_packets +
11890                 get_stat64(&hw_stats->tx_ucast_packets) +
11891                 get_stat64(&hw_stats->tx_mcast_packets) +
11892                 get_stat64(&hw_stats->tx_bcast_packets);
11893
11894         stats->rx_bytes = old_stats->rx_bytes +
11895                 get_stat64(&hw_stats->rx_octets);
11896         stats->tx_bytes = old_stats->tx_bytes +
11897                 get_stat64(&hw_stats->tx_octets);
11898
11899         stats->rx_errors = old_stats->rx_errors +
11900                 get_stat64(&hw_stats->rx_errors);
11901         stats->tx_errors = old_stats->tx_errors +
11902                 get_stat64(&hw_stats->tx_errors) +
11903                 get_stat64(&hw_stats->tx_mac_errors) +
11904                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11905                 get_stat64(&hw_stats->tx_discards);
11906
11907         stats->multicast = old_stats->multicast +
11908                 get_stat64(&hw_stats->rx_mcast_packets);
11909         stats->collisions = old_stats->collisions +
11910                 get_stat64(&hw_stats->tx_collisions);
11911
11912         stats->rx_length_errors = old_stats->rx_length_errors +
11913                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11914                 get_stat64(&hw_stats->rx_undersize_packets);
11915
11916         stats->rx_frame_errors = old_stats->rx_frame_errors +
11917                 get_stat64(&hw_stats->rx_align_errors);
11918         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11919                 get_stat64(&hw_stats->tx_discards);
11920         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11921                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11922
11923         stats->rx_crc_errors = old_stats->rx_crc_errors +
11924                 tg3_calc_crc_errors(tp);
11925
11926         stats->rx_missed_errors = old_stats->rx_missed_errors +
11927                 get_stat64(&hw_stats->rx_discards);
11928
11929         stats->rx_dropped = tp->rx_dropped;
11930         stats->tx_dropped = tp->tx_dropped;
11931 }
11932
11933 static int tg3_get_regs_len(struct net_device *dev)
11934 {
11935         return TG3_REG_BLK_SIZE;
11936 }
11937
11938 static void tg3_get_regs(struct net_device *dev,
11939                 struct ethtool_regs *regs, void *_p)
11940 {
11941         struct tg3 *tp = netdev_priv(dev);
11942
11943         regs->version = 0;
11944
11945         memset(_p, 0, TG3_REG_BLK_SIZE);
11946
11947         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11948                 return;
11949
11950         tg3_full_lock(tp, 0);
11951
11952         tg3_dump_legacy_regs(tp, (u32 *)_p);
11953
11954         tg3_full_unlock(tp);
11955 }
11956
11957 static int tg3_get_eeprom_len(struct net_device *dev)
11958 {
11959         struct tg3 *tp = netdev_priv(dev);
11960
11961         return tp->nvram_size;
11962 }
11963
11964 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11965 {
11966         struct tg3 *tp = netdev_priv(dev);
11967         int ret, cpmu_restore = 0;
11968         u8  *pd;
11969         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11970         __be32 val;
11971
11972         if (tg3_flag(tp, NO_NVRAM))
11973                 return -EINVAL;
11974
11975         offset = eeprom->offset;
11976         len = eeprom->len;
11977         eeprom->len = 0;
11978
11979         eeprom->magic = TG3_EEPROM_MAGIC;
11980
11981         /* Override clock, link aware and link idle modes */
11982         if (tg3_flag(tp, CPMU_PRESENT)) {
11983                 cpmu_val = tr32(TG3_CPMU_CTRL);
11984                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11985                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11986                         tw32(TG3_CPMU_CTRL, cpmu_val &
11987                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11988                                              CPMU_CTRL_LINK_IDLE_MODE));
11989                         cpmu_restore = 1;
11990                 }
11991         }
11992         tg3_override_clk(tp);
11993
11994         if (offset & 3) {
11995                 /* adjustments to start on required 4 byte boundary */
11996                 b_offset = offset & 3;
11997                 b_count = 4 - b_offset;
11998                 if (b_count > len) {
11999                         /* i.e. offset=1 len=2 */
12000                         b_count = len;
12001                 }
12002                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12003                 if (ret)
12004                         goto eeprom_done;
12005                 memcpy(data, ((char *)&val) + b_offset, b_count);
12006                 len -= b_count;
12007                 offset += b_count;
12008                 eeprom->len += b_count;
12009         }
12010
12011         /* read bytes up to the last 4 byte boundary */
12012         pd = &data[eeprom->len];
12013         for (i = 0; i < (len - (len & 3)); i += 4) {
12014                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12015                 if (ret) {
12016                         if (i)
12017                                 i -= 4;
12018                         eeprom->len += i;
12019                         goto eeprom_done;
12020                 }
12021                 memcpy(pd + i, &val, 4);
12022                 if (need_resched()) {
12023                         if (signal_pending(current)) {
12024                                 eeprom->len += i;
12025                                 ret = -EINTR;
12026                                 goto eeprom_done;
12027                         }
12028                         cond_resched();
12029                 }
12030         }
12031         eeprom->len += i;
12032
12033         if (len & 3) {
12034                 /* read last bytes not ending on 4 byte boundary */
12035                 pd = &data[eeprom->len];
12036                 b_count = len & 3;
12037                 b_offset = offset + len - b_count;
12038                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12039                 if (ret)
12040                         goto eeprom_done;
12041                 memcpy(pd, &val, b_count);
12042                 eeprom->len += b_count;
12043         }
12044         ret = 0;
12045
12046 eeprom_done:
12047         /* Restore clock, link aware and link idle modes */
12048         tg3_restore_clk(tp);
12049         if (cpmu_restore)
12050                 tw32(TG3_CPMU_CTRL, cpmu_val);
12051
12052         return ret;
12053 }
12054
12055 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12056 {
12057         struct tg3 *tp = netdev_priv(dev);
12058         int ret;
12059         u32 offset, len, b_offset, odd_len;
12060         u8 *buf;
12061         __be32 start = 0, end;
12062
12063         if (tg3_flag(tp, NO_NVRAM) ||
12064             eeprom->magic != TG3_EEPROM_MAGIC)
12065                 return -EINVAL;
12066
12067         offset = eeprom->offset;
12068         len = eeprom->len;
12069
12070         if ((b_offset = (offset & 3))) {
12071                 /* adjustments to start on required 4 byte boundary */
12072                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12073                 if (ret)
12074                         return ret;
12075                 len += b_offset;
12076                 offset &= ~3;
12077                 if (len < 4)
12078                         len = 4;
12079         }
12080
12081         odd_len = 0;
12082         if (len & 3) {
12083                 /* adjustments to end on required 4 byte boundary */
12084                 odd_len = 1;
12085                 len = (len + 3) & ~3;
12086                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12087                 if (ret)
12088                         return ret;
12089         }
12090
12091         buf = data;
12092         if (b_offset || odd_len) {
12093                 buf = kmalloc(len, GFP_KERNEL);
12094                 if (!buf)
12095                         return -ENOMEM;
12096                 if (b_offset)
12097                         memcpy(buf, &start, 4);
12098                 if (odd_len)
12099                         memcpy(buf+len-4, &end, 4);
12100                 memcpy(buf + b_offset, data, eeprom->len);
12101         }
12102
12103         ret = tg3_nvram_write_block(tp, offset, len, buf);
12104
12105         if (buf != data)
12106                 kfree(buf);
12107
12108         return ret;
12109 }
12110
12111 static int tg3_get_link_ksettings(struct net_device *dev,
12112                                   struct ethtool_link_ksettings *cmd)
12113 {
12114         struct tg3 *tp = netdev_priv(dev);
12115         u32 supported, advertising;
12116
12117         if (tg3_flag(tp, USE_PHYLIB)) {
12118                 struct phy_device *phydev;
12119                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12120                         return -EAGAIN;
12121                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12122                 phy_ethtool_ksettings_get(phydev, cmd);
12123
12124                 return 0;
12125         }
12126
12127         supported = (SUPPORTED_Autoneg);
12128
12129         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12130                 supported |= (SUPPORTED_1000baseT_Half |
12131                               SUPPORTED_1000baseT_Full);
12132
12133         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12134                 supported |= (SUPPORTED_100baseT_Half |
12135                               SUPPORTED_100baseT_Full |
12136                               SUPPORTED_10baseT_Half |
12137                               SUPPORTED_10baseT_Full |
12138                               SUPPORTED_TP);
12139                 cmd->base.port = PORT_TP;
12140         } else {
12141                 supported |= SUPPORTED_FIBRE;
12142                 cmd->base.port = PORT_FIBRE;
12143         }
12144         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12145                                                 supported);
12146
12147         advertising = tp->link_config.advertising;
12148         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12149                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12150                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12151                                 advertising |= ADVERTISED_Pause;
12152                         } else {
12153                                 advertising |= ADVERTISED_Pause |
12154                                         ADVERTISED_Asym_Pause;
12155                         }
12156                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12157                         advertising |= ADVERTISED_Asym_Pause;
12158                 }
12159         }
12160         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12161                                                 advertising);
12162
12163         if (netif_running(dev) && tp->link_up) {
12164                 cmd->base.speed = tp->link_config.active_speed;
12165                 cmd->base.duplex = tp->link_config.active_duplex;
12166                 ethtool_convert_legacy_u32_to_link_mode(
12167                         cmd->link_modes.lp_advertising,
12168                         tp->link_config.rmt_adv);
12169
12170                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12171                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12172                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12173                         else
12174                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12175                 }
12176         } else {
12177                 cmd->base.speed = SPEED_UNKNOWN;
12178                 cmd->base.duplex = DUPLEX_UNKNOWN;
12179                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12180         }
12181         cmd->base.phy_address = tp->phy_addr;
12182         cmd->base.autoneg = tp->link_config.autoneg;
12183         return 0;
12184 }
12185
12186 static int tg3_set_link_ksettings(struct net_device *dev,
12187                                   const struct ethtool_link_ksettings *cmd)
12188 {
12189         struct tg3 *tp = netdev_priv(dev);
12190         u32 speed = cmd->base.speed;
12191         u32 advertising;
12192
12193         if (tg3_flag(tp, USE_PHYLIB)) {
12194                 struct phy_device *phydev;
12195                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12196                         return -EAGAIN;
12197                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12198                 return phy_ethtool_ksettings_set(phydev, cmd);
12199         }
12200
12201         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12202             cmd->base.autoneg != AUTONEG_DISABLE)
12203                 return -EINVAL;
12204
12205         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12206             cmd->base.duplex != DUPLEX_FULL &&
12207             cmd->base.duplex != DUPLEX_HALF)
12208                 return -EINVAL;
12209
12210         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12211                                                 cmd->link_modes.advertising);
12212
12213         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12214                 u32 mask = ADVERTISED_Autoneg |
12215                            ADVERTISED_Pause |
12216                            ADVERTISED_Asym_Pause;
12217
12218                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12219                         mask |= ADVERTISED_1000baseT_Half |
12220                                 ADVERTISED_1000baseT_Full;
12221
12222                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12223                         mask |= ADVERTISED_100baseT_Half |
12224                                 ADVERTISED_100baseT_Full |
12225                                 ADVERTISED_10baseT_Half |
12226                                 ADVERTISED_10baseT_Full |
12227                                 ADVERTISED_TP;
12228                 else
12229                         mask |= ADVERTISED_FIBRE;
12230
12231                 if (advertising & ~mask)
12232                         return -EINVAL;
12233
12234                 mask &= (ADVERTISED_1000baseT_Half |
12235                          ADVERTISED_1000baseT_Full |
12236                          ADVERTISED_100baseT_Half |
12237                          ADVERTISED_100baseT_Full |
12238                          ADVERTISED_10baseT_Half |
12239                          ADVERTISED_10baseT_Full);
12240
12241                 advertising &= mask;
12242         } else {
12243                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12244                         if (speed != SPEED_1000)
12245                                 return -EINVAL;
12246
12247                         if (cmd->base.duplex != DUPLEX_FULL)
12248                                 return -EINVAL;
12249                 } else {
12250                         if (speed != SPEED_100 &&
12251                             speed != SPEED_10)
12252                                 return -EINVAL;
12253                 }
12254         }
12255
12256         tg3_full_lock(tp, 0);
12257
12258         tp->link_config.autoneg = cmd->base.autoneg;
12259         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12260                 tp->link_config.advertising = (advertising |
12261                                               ADVERTISED_Autoneg);
12262                 tp->link_config.speed = SPEED_UNKNOWN;
12263                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12264         } else {
12265                 tp->link_config.advertising = 0;
12266                 tp->link_config.speed = speed;
12267                 tp->link_config.duplex = cmd->base.duplex;
12268         }
12269
12270         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12271
12272         tg3_warn_mgmt_link_flap(tp);
12273
12274         if (netif_running(dev))
12275                 tg3_setup_phy(tp, true);
12276
12277         tg3_full_unlock(tp);
12278
12279         return 0;
12280 }
12281
12282 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12283 {
12284         struct tg3 *tp = netdev_priv(dev);
12285
12286         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12287         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12288         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12289         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12290 }
12291
12292 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12293 {
12294         struct tg3 *tp = netdev_priv(dev);
12295
12296         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12297                 wol->supported = WAKE_MAGIC;
12298         else
12299                 wol->supported = 0;
12300         wol->wolopts = 0;
12301         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12302                 wol->wolopts = WAKE_MAGIC;
12303         memset(&wol->sopass, 0, sizeof(wol->sopass));
12304 }
12305
12306 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12307 {
12308         struct tg3 *tp = netdev_priv(dev);
12309         struct device *dp = &tp->pdev->dev;
12310
12311         if (wol->wolopts & ~WAKE_MAGIC)
12312                 return -EINVAL;
12313         if ((wol->wolopts & WAKE_MAGIC) &&
12314             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12315                 return -EINVAL;
12316
12317         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12318
12319         if (device_may_wakeup(dp))
12320                 tg3_flag_set(tp, WOL_ENABLE);
12321         else
12322                 tg3_flag_clear(tp, WOL_ENABLE);
12323
12324         return 0;
12325 }
12326
12327 static u32 tg3_get_msglevel(struct net_device *dev)
12328 {
12329         struct tg3 *tp = netdev_priv(dev);
12330         return tp->msg_enable;
12331 }
12332
12333 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12334 {
12335         struct tg3 *tp = netdev_priv(dev);
12336         tp->msg_enable = value;
12337 }
12338
12339 static int tg3_nway_reset(struct net_device *dev)
12340 {
12341         struct tg3 *tp = netdev_priv(dev);
12342         int r;
12343
12344         if (!netif_running(dev))
12345                 return -EAGAIN;
12346
12347         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12348                 return -EINVAL;
12349
12350         tg3_warn_mgmt_link_flap(tp);
12351
12352         if (tg3_flag(tp, USE_PHYLIB)) {
12353                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12354                         return -EAGAIN;
12355                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12356         } else {
12357                 u32 bmcr;
12358
12359                 spin_lock_bh(&tp->lock);
12360                 r = -EINVAL;
12361                 tg3_readphy(tp, MII_BMCR, &bmcr);
12362                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12363                     ((bmcr & BMCR_ANENABLE) ||
12364                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12365                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12366                                                    BMCR_ANENABLE);
12367                         r = 0;
12368                 }
12369                 spin_unlock_bh(&tp->lock);
12370         }
12371
12372         return r;
12373 }
12374
12375 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12376 {
12377         struct tg3 *tp = netdev_priv(dev);
12378
12379         ering->rx_max_pending = tp->rx_std_ring_mask;
12380         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12381                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12382         else
12383                 ering->rx_jumbo_max_pending = 0;
12384
12385         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12386
12387         ering->rx_pending = tp->rx_pending;
12388         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12389                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12390         else
12391                 ering->rx_jumbo_pending = 0;
12392
12393         ering->tx_pending = tp->napi[0].tx_pending;
12394 }
12395
12396 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12397 {
12398         struct tg3 *tp = netdev_priv(dev);
12399         int i, irq_sync = 0, err = 0;
12400
12401         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12402             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12403             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12404             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12405             (tg3_flag(tp, TSO_BUG) &&
12406              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12407                 return -EINVAL;
12408
12409         if (netif_running(dev)) {
12410                 tg3_phy_stop(tp);
12411                 tg3_netif_stop(tp);
12412                 irq_sync = 1;
12413         }
12414
12415         tg3_full_lock(tp, irq_sync);
12416
12417         tp->rx_pending = ering->rx_pending;
12418
12419         if (tg3_flag(tp, MAX_RXPEND_64) &&
12420             tp->rx_pending > 63)
12421                 tp->rx_pending = 63;
12422
12423         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12424                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12425
12426         for (i = 0; i < tp->irq_max; i++)
12427                 tp->napi[i].tx_pending = ering->tx_pending;
12428
12429         if (netif_running(dev)) {
12430                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12431                 err = tg3_restart_hw(tp, false);
12432                 if (!err)
12433                         tg3_netif_start(tp);
12434         }
12435
12436         tg3_full_unlock(tp);
12437
12438         if (irq_sync && !err)
12439                 tg3_phy_start(tp);
12440
12441         return err;
12442 }
12443
12444 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12445 {
12446         struct tg3 *tp = netdev_priv(dev);
12447
12448         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12449
12450         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12451                 epause->rx_pause = 1;
12452         else
12453                 epause->rx_pause = 0;
12454
12455         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12456                 epause->tx_pause = 1;
12457         else
12458                 epause->tx_pause = 0;
12459 }
12460
12461 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12462 {
12463         struct tg3 *tp = netdev_priv(dev);
12464         int err = 0;
12465
12466         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12467                 tg3_warn_mgmt_link_flap(tp);
12468
12469         if (tg3_flag(tp, USE_PHYLIB)) {
12470                 u32 newadv;
12471                 struct phy_device *phydev;
12472
12473                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12474
12475                 if (!(phydev->supported & SUPPORTED_Pause) ||
12476                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12477                      (epause->rx_pause != epause->tx_pause)))
12478                         return -EINVAL;
12479
12480                 tp->link_config.flowctrl = 0;
12481                 if (epause->rx_pause) {
12482                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12483
12484                         if (epause->tx_pause) {
12485                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12486                                 newadv = ADVERTISED_Pause;
12487                         } else
12488                                 newadv = ADVERTISED_Pause |
12489                                          ADVERTISED_Asym_Pause;
12490                 } else if (epause->tx_pause) {
12491                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12492                         newadv = ADVERTISED_Asym_Pause;
12493                 } else
12494                         newadv = 0;
12495
12496                 if (epause->autoneg)
12497                         tg3_flag_set(tp, PAUSE_AUTONEG);
12498                 else
12499                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12500
12501                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12502                         u32 oldadv = phydev->advertising &
12503                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12504                         if (oldadv != newadv) {
12505                                 phydev->advertising &=
12506                                         ~(ADVERTISED_Pause |
12507                                           ADVERTISED_Asym_Pause);
12508                                 phydev->advertising |= newadv;
12509                                 if (phydev->autoneg) {
12510                                         /*
12511                                          * Always renegotiate the link to
12512                                          * inform our link partner of our
12513                                          * flow control settings, even if the
12514                                          * flow control is forced.  Let
12515                                          * tg3_adjust_link() do the final
12516                                          * flow control setup.
12517                                          */
12518                                         return phy_start_aneg(phydev);
12519                                 }
12520                         }
12521
12522                         if (!epause->autoneg)
12523                                 tg3_setup_flow_control(tp, 0, 0);
12524                 } else {
12525                         tp->link_config.advertising &=
12526                                         ~(ADVERTISED_Pause |
12527                                           ADVERTISED_Asym_Pause);
12528                         tp->link_config.advertising |= newadv;
12529                 }
12530         } else {
12531                 int irq_sync = 0;
12532
12533                 if (netif_running(dev)) {
12534                         tg3_netif_stop(tp);
12535                         irq_sync = 1;
12536                 }
12537
12538                 tg3_full_lock(tp, irq_sync);
12539
12540                 if (epause->autoneg)
12541                         tg3_flag_set(tp, PAUSE_AUTONEG);
12542                 else
12543                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12544                 if (epause->rx_pause)
12545                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12546                 else
12547                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12548                 if (epause->tx_pause)
12549                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12550                 else
12551                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12552
12553                 if (netif_running(dev)) {
12554                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12555                         err = tg3_restart_hw(tp, false);
12556                         if (!err)
12557                                 tg3_netif_start(tp);
12558                 }
12559
12560                 tg3_full_unlock(tp);
12561         }
12562
12563         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12564
12565         return err;
12566 }
12567
12568 static int tg3_get_sset_count(struct net_device *dev, int sset)
12569 {
12570         switch (sset) {
12571         case ETH_SS_TEST:
12572                 return TG3_NUM_TEST;
12573         case ETH_SS_STATS:
12574                 return TG3_NUM_STATS;
12575         default:
12576                 return -EOPNOTSUPP;
12577         }
12578 }
12579
12580 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12581                          u32 *rules __always_unused)
12582 {
12583         struct tg3 *tp = netdev_priv(dev);
12584
12585         if (!tg3_flag(tp, SUPPORT_MSIX))
12586                 return -EOPNOTSUPP;
12587
12588         switch (info->cmd) {
12589         case ETHTOOL_GRXRINGS:
12590                 if (netif_running(tp->dev))
12591                         info->data = tp->rxq_cnt;
12592                 else {
12593                         info->data = num_online_cpus();
12594                         if (info->data > TG3_RSS_MAX_NUM_QS)
12595                                 info->data = TG3_RSS_MAX_NUM_QS;
12596                 }
12597
12598                 return 0;
12599
12600         default:
12601                 return -EOPNOTSUPP;
12602         }
12603 }
12604
12605 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12606 {
12607         u32 size = 0;
12608         struct tg3 *tp = netdev_priv(dev);
12609
12610         if (tg3_flag(tp, SUPPORT_MSIX))
12611                 size = TG3_RSS_INDIR_TBL_SIZE;
12612
12613         return size;
12614 }
12615
12616 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12617 {
12618         struct tg3 *tp = netdev_priv(dev);
12619         int i;
12620
12621         if (hfunc)
12622                 *hfunc = ETH_RSS_HASH_TOP;
12623         if (!indir)
12624                 return 0;
12625
12626         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12627                 indir[i] = tp->rss_ind_tbl[i];
12628
12629         return 0;
12630 }
12631
12632 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12633                         const u8 hfunc)
12634 {
12635         struct tg3 *tp = netdev_priv(dev);
12636         size_t i;
12637
12638         /* We require at least one supported parameter to be changed and no
12639          * change in any of the unsupported parameters
12640          */
12641         if (key ||
12642             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12643                 return -EOPNOTSUPP;
12644
12645         if (!indir)
12646                 return 0;
12647
12648         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12649                 tp->rss_ind_tbl[i] = indir[i];
12650
12651         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12652                 return 0;
12653
12654         /* It is legal to write the indirection
12655          * table while the device is running.
12656          */
12657         tg3_full_lock(tp, 0);
12658         tg3_rss_write_indir_tbl(tp);
12659         tg3_full_unlock(tp);
12660
12661         return 0;
12662 }
12663
12664 static void tg3_get_channels(struct net_device *dev,
12665                              struct ethtool_channels *channel)
12666 {
12667         struct tg3 *tp = netdev_priv(dev);
12668         u32 deflt_qs = netif_get_num_default_rss_queues();
12669
12670         channel->max_rx = tp->rxq_max;
12671         channel->max_tx = tp->txq_max;
12672
12673         if (netif_running(dev)) {
12674                 channel->rx_count = tp->rxq_cnt;
12675                 channel->tx_count = tp->txq_cnt;
12676         } else {
12677                 if (tp->rxq_req)
12678                         channel->rx_count = tp->rxq_req;
12679                 else
12680                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12681
12682                 if (tp->txq_req)
12683                         channel->tx_count = tp->txq_req;
12684                 else
12685                         channel->tx_count = min(deflt_qs, tp->txq_max);
12686         }
12687 }
12688
12689 static int tg3_set_channels(struct net_device *dev,
12690                             struct ethtool_channels *channel)
12691 {
12692         struct tg3 *tp = netdev_priv(dev);
12693
12694         if (!tg3_flag(tp, SUPPORT_MSIX))
12695                 return -EOPNOTSUPP;
12696
12697         if (channel->rx_count > tp->rxq_max ||
12698             channel->tx_count > tp->txq_max)
12699                 return -EINVAL;
12700
12701         tp->rxq_req = channel->rx_count;
12702         tp->txq_req = channel->tx_count;
12703
12704         if (!netif_running(dev))
12705                 return 0;
12706
12707         tg3_stop(tp);
12708
12709         tg3_carrier_off(tp);
12710
12711         tg3_start(tp, true, false, false);
12712
12713         return 0;
12714 }
12715
12716 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12717 {
12718         switch (stringset) {
12719         case ETH_SS_STATS:
12720                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12721                 break;
12722         case ETH_SS_TEST:
12723                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12724                 break;
12725         default:
12726                 WARN_ON(1);     /* we need a WARN() */
12727                 break;
12728         }
12729 }
12730
12731 static int tg3_set_phys_id(struct net_device *dev,
12732                             enum ethtool_phys_id_state state)
12733 {
12734         struct tg3 *tp = netdev_priv(dev);
12735
12736         if (!netif_running(tp->dev))
12737                 return -EAGAIN;
12738
12739         switch (state) {
12740         case ETHTOOL_ID_ACTIVE:
12741                 return 1;       /* cycle on/off once per second */
12742
12743         case ETHTOOL_ID_ON:
12744                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12745                      LED_CTRL_1000MBPS_ON |
12746                      LED_CTRL_100MBPS_ON |
12747                      LED_CTRL_10MBPS_ON |
12748                      LED_CTRL_TRAFFIC_OVERRIDE |
12749                      LED_CTRL_TRAFFIC_BLINK |
12750                      LED_CTRL_TRAFFIC_LED);
12751                 break;
12752
12753         case ETHTOOL_ID_OFF:
12754                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12755                      LED_CTRL_TRAFFIC_OVERRIDE);
12756                 break;
12757
12758         case ETHTOOL_ID_INACTIVE:
12759                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12760                 break;
12761         }
12762
12763         return 0;
12764 }
12765
12766 static void tg3_get_ethtool_stats(struct net_device *dev,
12767                                    struct ethtool_stats *estats, u64 *tmp_stats)
12768 {
12769         struct tg3 *tp = netdev_priv(dev);
12770
12771         if (tp->hw_stats)
12772                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12773         else
12774                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12775 }
12776
12777 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12778 {
12779         int i;
12780         __be32 *buf;
12781         u32 offset = 0, len = 0;
12782         u32 magic, val;
12783
12784         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12785                 return NULL;
12786
12787         if (magic == TG3_EEPROM_MAGIC) {
12788                 for (offset = TG3_NVM_DIR_START;
12789                      offset < TG3_NVM_DIR_END;
12790                      offset += TG3_NVM_DIRENT_SIZE) {
12791                         if (tg3_nvram_read(tp, offset, &val))
12792                                 return NULL;
12793
12794                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12795                             TG3_NVM_DIRTYPE_EXTVPD)
12796                                 break;
12797                 }
12798
12799                 if (offset != TG3_NVM_DIR_END) {
12800                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12801                         if (tg3_nvram_read(tp, offset + 4, &offset))
12802                                 return NULL;
12803
12804                         offset = tg3_nvram_logical_addr(tp, offset);
12805                 }
12806         }
12807
12808         if (!offset || !len) {
12809                 offset = TG3_NVM_VPD_OFF;
12810                 len = TG3_NVM_VPD_LEN;
12811         }
12812
12813         buf = kmalloc(len, GFP_KERNEL);
12814         if (buf == NULL)
12815                 return NULL;
12816
12817         if (magic == TG3_EEPROM_MAGIC) {
12818                 for (i = 0; i < len; i += 4) {
12819                         /* The data is in little-endian format in NVRAM.
12820                          * Use the big-endian read routines to preserve
12821                          * the byte order as it exists in NVRAM.
12822                          */
12823                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12824                                 goto error;
12825                 }
12826         } else {
12827                 u8 *ptr;
12828                 ssize_t cnt;
12829                 unsigned int pos = 0;
12830
12831                 ptr = (u8 *)&buf[0];
12832                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12833                         cnt = pci_read_vpd(tp->pdev, pos,
12834                                            len - pos, ptr);
12835                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12836                                 cnt = 0;
12837                         else if (cnt < 0)
12838                                 goto error;
12839                 }
12840                 if (pos != len)
12841                         goto error;
12842         }
12843
12844         *vpdlen = len;
12845
12846         return buf;
12847
12848 error:
12849         kfree(buf);
12850         return NULL;
12851 }
12852
12853 #define NVRAM_TEST_SIZE 0x100
12854 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12855 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12856 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12857 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12858 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12859 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12860 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12861 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12862
12863 static int tg3_test_nvram(struct tg3 *tp)
12864 {
12865         u32 csum, magic, len;
12866         __be32 *buf;
12867         int i, j, k, err = 0, size;
12868
12869         if (tg3_flag(tp, NO_NVRAM))
12870                 return 0;
12871
12872         if (tg3_nvram_read(tp, 0, &magic) != 0)
12873                 return -EIO;
12874
12875         if (magic == TG3_EEPROM_MAGIC)
12876                 size = NVRAM_TEST_SIZE;
12877         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12878                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12879                     TG3_EEPROM_SB_FORMAT_1) {
12880                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12881                         case TG3_EEPROM_SB_REVISION_0:
12882                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12883                                 break;
12884                         case TG3_EEPROM_SB_REVISION_2:
12885                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12886                                 break;
12887                         case TG3_EEPROM_SB_REVISION_3:
12888                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12889                                 break;
12890                         case TG3_EEPROM_SB_REVISION_4:
12891                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12892                                 break;
12893                         case TG3_EEPROM_SB_REVISION_5:
12894                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12895                                 break;
12896                         case TG3_EEPROM_SB_REVISION_6:
12897                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12898                                 break;
12899                         default:
12900                                 return -EIO;
12901                         }
12902                 } else
12903                         return 0;
12904         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12905                 size = NVRAM_SELFBOOT_HW_SIZE;
12906         else
12907                 return -EIO;
12908
12909         buf = kmalloc(size, GFP_KERNEL);
12910         if (buf == NULL)
12911                 return -ENOMEM;
12912
12913         err = -EIO;
12914         for (i = 0, j = 0; i < size; i += 4, j++) {
12915                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12916                 if (err)
12917                         break;
12918         }
12919         if (i < size)
12920                 goto out;
12921
12922         /* Selfboot format */
12923         magic = be32_to_cpu(buf[0]);
12924         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12925             TG3_EEPROM_MAGIC_FW) {
12926                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12927
12928                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12929                     TG3_EEPROM_SB_REVISION_2) {
12930                         /* For rev 2, the csum doesn't include the MBA. */
12931                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12932                                 csum8 += buf8[i];
12933                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12934                                 csum8 += buf8[i];
12935                 } else {
12936                         for (i = 0; i < size; i++)
12937                                 csum8 += buf8[i];
12938                 }
12939
12940                 if (csum8 == 0) {
12941                         err = 0;
12942                         goto out;
12943                 }
12944
12945                 err = -EIO;
12946                 goto out;
12947         }
12948
12949         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12950             TG3_EEPROM_MAGIC_HW) {
12951                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12952                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12953                 u8 *buf8 = (u8 *) buf;
12954
12955                 /* Separate the parity bits and the data bytes.  */
12956                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12957                         if ((i == 0) || (i == 8)) {
12958                                 int l;
12959                                 u8 msk;
12960
12961                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12962                                         parity[k++] = buf8[i] & msk;
12963                                 i++;
12964                         } else if (i == 16) {
12965                                 int l;
12966                                 u8 msk;
12967
12968                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12969                                         parity[k++] = buf8[i] & msk;
12970                                 i++;
12971
12972                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12973                                         parity[k++] = buf8[i] & msk;
12974                                 i++;
12975                         }
12976                         data[j++] = buf8[i];
12977                 }
12978
12979                 err = -EIO;
12980                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12981                         u8 hw8 = hweight8(data[i]);
12982
12983                         if ((hw8 & 0x1) && parity[i])
12984                                 goto out;
12985                         else if (!(hw8 & 0x1) && !parity[i])
12986                                 goto out;
12987                 }
12988                 err = 0;
12989                 goto out;
12990         }
12991
12992         err = -EIO;
12993
12994         /* Bootstrap checksum at offset 0x10 */
12995         csum = calc_crc((unsigned char *) buf, 0x10);
12996         if (csum != le32_to_cpu(buf[0x10/4]))
12997                 goto out;
12998
12999         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13000         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13001         if (csum != le32_to_cpu(buf[0xfc/4]))
13002                 goto out;
13003
13004         kfree(buf);
13005
13006         buf = tg3_vpd_readblock(tp, &len);
13007         if (!buf)
13008                 return -ENOMEM;
13009
13010         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13011         if (i > 0) {
13012                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13013                 if (j < 0)
13014                         goto out;
13015
13016                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13017                         goto out;
13018
13019                 i += PCI_VPD_LRDT_TAG_SIZE;
13020                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13021                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13022                 if (j > 0) {
13023                         u8 csum8 = 0;
13024
13025                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13026
13027                         for (i = 0; i <= j; i++)
13028                                 csum8 += ((u8 *)buf)[i];
13029
13030                         if (csum8)
13031                                 goto out;
13032                 }
13033         }
13034
13035         err = 0;
13036
13037 out:
13038         kfree(buf);
13039         return err;
13040 }
13041
13042 #define TG3_SERDES_TIMEOUT_SEC  2
13043 #define TG3_COPPER_TIMEOUT_SEC  6
13044
13045 static int tg3_test_link(struct tg3 *tp)
13046 {
13047         int i, max;
13048
13049         if (!netif_running(tp->dev))
13050                 return -ENODEV;
13051
13052         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13053                 max = TG3_SERDES_TIMEOUT_SEC;
13054         else
13055                 max = TG3_COPPER_TIMEOUT_SEC;
13056
13057         for (i = 0; i < max; i++) {
13058                 if (tp->link_up)
13059                         return 0;
13060
13061                 if (msleep_interruptible(1000))
13062                         break;
13063         }
13064
13065         return -EIO;
13066 }
13067
13068 /* Only test the commonly used registers */
13069 static int tg3_test_registers(struct tg3 *tp)
13070 {
13071         int i, is_5705, is_5750;
13072         u32 offset, read_mask, write_mask, val, save_val, read_val;
13073         static struct {
13074                 u16 offset;
13075                 u16 flags;
13076 #define TG3_FL_5705     0x1
13077 #define TG3_FL_NOT_5705 0x2
13078 #define TG3_FL_NOT_5788 0x4
13079 #define TG3_FL_NOT_5750 0x8
13080                 u32 read_mask;
13081                 u32 write_mask;
13082         } reg_tbl[] = {
13083                 /* MAC Control Registers */
13084                 { MAC_MODE, TG3_FL_NOT_5705,
13085                         0x00000000, 0x00ef6f8c },
13086                 { MAC_MODE, TG3_FL_5705,
13087                         0x00000000, 0x01ef6b8c },
13088                 { MAC_STATUS, TG3_FL_NOT_5705,
13089                         0x03800107, 0x00000000 },
13090                 { MAC_STATUS, TG3_FL_5705,
13091                         0x03800100, 0x00000000 },
13092                 { MAC_ADDR_0_HIGH, 0x0000,
13093                         0x00000000, 0x0000ffff },
13094                 { MAC_ADDR_0_LOW, 0x0000,
13095                         0x00000000, 0xffffffff },
13096                 { MAC_RX_MTU_SIZE, 0x0000,
13097                         0x00000000, 0x0000ffff },
13098                 { MAC_TX_MODE, 0x0000,
13099                         0x00000000, 0x00000070 },
13100                 { MAC_TX_LENGTHS, 0x0000,
13101                         0x00000000, 0x00003fff },
13102                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13103                         0x00000000, 0x000007fc },
13104                 { MAC_RX_MODE, TG3_FL_5705,
13105                         0x00000000, 0x000007dc },
13106                 { MAC_HASH_REG_0, 0x0000,
13107                         0x00000000, 0xffffffff },
13108                 { MAC_HASH_REG_1, 0x0000,
13109                         0x00000000, 0xffffffff },
13110                 { MAC_HASH_REG_2, 0x0000,
13111                         0x00000000, 0xffffffff },
13112                 { MAC_HASH_REG_3, 0x0000,
13113                         0x00000000, 0xffffffff },
13114
13115                 /* Receive Data and Receive BD Initiator Control Registers. */
13116                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13117                         0x00000000, 0xffffffff },
13118                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13119                         0x00000000, 0xffffffff },
13120                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13121                         0x00000000, 0x00000003 },
13122                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { RCVDBDI_STD_BD+0, 0x0000,
13125                         0x00000000, 0xffffffff },
13126                 { RCVDBDI_STD_BD+4, 0x0000,
13127                         0x00000000, 0xffffffff },
13128                 { RCVDBDI_STD_BD+8, 0x0000,
13129                         0x00000000, 0xffff0002 },
13130                 { RCVDBDI_STD_BD+0xc, 0x0000,
13131                         0x00000000, 0xffffffff },
13132
13133                 /* Receive BD Initiator Control Registers. */
13134                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13135                         0x00000000, 0xffffffff },
13136                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13137                         0x00000000, 0x000003ff },
13138                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13139                         0x00000000, 0xffffffff },
13140
13141                 /* Host Coalescing Control Registers. */
13142                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13143                         0x00000000, 0x00000004 },
13144                 { HOSTCC_MODE, TG3_FL_5705,
13145                         0x00000000, 0x000000f6 },
13146                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13149                         0x00000000, 0x000003ff },
13150                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13153                         0x00000000, 0x000003ff },
13154                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13155                         0x00000000, 0xffffffff },
13156                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13157                         0x00000000, 0x000000ff },
13158                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13159                         0x00000000, 0xffffffff },
13160                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13161                         0x00000000, 0x000000ff },
13162                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13163                         0x00000000, 0xffffffff },
13164                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13165                         0x00000000, 0xffffffff },
13166                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13167                         0x00000000, 0xffffffff },
13168                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13169                         0x00000000, 0x000000ff },
13170                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13171                         0x00000000, 0xffffffff },
13172                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13173                         0x00000000, 0x000000ff },
13174                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13175                         0x00000000, 0xffffffff },
13176                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13177                         0x00000000, 0xffffffff },
13178                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13179                         0x00000000, 0xffffffff },
13180                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13181                         0x00000000, 0xffffffff },
13182                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13183                         0x00000000, 0xffffffff },
13184                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13185                         0xffffffff, 0x00000000 },
13186                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13187                         0xffffffff, 0x00000000 },
13188
13189                 /* Buffer Manager Control Registers. */
13190                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13191                         0x00000000, 0x007fff80 },
13192                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13193                         0x00000000, 0x007fffff },
13194                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13195                         0x00000000, 0x0000003f },
13196                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13197                         0x00000000, 0x000001ff },
13198                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13199                         0x00000000, 0x000001ff },
13200                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13201                         0xffffffff, 0x00000000 },
13202                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13203                         0xffffffff, 0x00000000 },
13204
13205                 /* Mailbox Registers */
13206                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13207                         0x00000000, 0x000001ff },
13208                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13209                         0x00000000, 0x000001ff },
13210                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13211                         0x00000000, 0x000007ff },
13212                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13213                         0x00000000, 0x000001ff },
13214
13215                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13216         };
13217
13218         is_5705 = is_5750 = 0;
13219         if (tg3_flag(tp, 5705_PLUS)) {
13220                 is_5705 = 1;
13221                 if (tg3_flag(tp, 5750_PLUS))
13222                         is_5750 = 1;
13223         }
13224
13225         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13226                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13227                         continue;
13228
13229                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13230                         continue;
13231
13232                 if (tg3_flag(tp, IS_5788) &&
13233                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13234                         continue;
13235
13236                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13237                         continue;
13238
13239                 offset = (u32) reg_tbl[i].offset;
13240                 read_mask = reg_tbl[i].read_mask;
13241                 write_mask = reg_tbl[i].write_mask;
13242
13243                 /* Save the original register content */
13244                 save_val = tr32(offset);
13245
13246                 /* Determine the read-only value. */
13247                 read_val = save_val & read_mask;
13248
13249                 /* Write zero to the register, then make sure the read-only bits
13250                  * are not changed and the read/write bits are all zeros.
13251                  */
13252                 tw32(offset, 0);
13253
13254                 val = tr32(offset);
13255
13256                 /* Test the read-only and read/write bits. */
13257                 if (((val & read_mask) != read_val) || (val & write_mask))
13258                         goto out;
13259
13260                 /* Write ones to all the bits defined by RdMask and WrMask, then
13261                  * make sure the read-only bits are not changed and the
13262                  * read/write bits are all ones.
13263                  */
13264                 tw32(offset, read_mask | write_mask);
13265
13266                 val = tr32(offset);
13267
13268                 /* Test the read-only bits. */
13269                 if ((val & read_mask) != read_val)
13270                         goto out;
13271
13272                 /* Test the read/write bits. */
13273                 if ((val & write_mask) != write_mask)
13274                         goto out;
13275
13276                 tw32(offset, save_val);
13277         }
13278
13279         return 0;
13280
13281 out:
13282         if (netif_msg_hw(tp))
13283                 netdev_err(tp->dev,
13284                            "Register test failed at offset %x\n", offset);
13285         tw32(offset, save_val);
13286         return -EIO;
13287 }
13288
13289 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13290 {
13291         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13292         int i;
13293         u32 j;
13294
13295         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13296                 for (j = 0; j < len; j += 4) {
13297                         u32 val;
13298
13299                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13300                         tg3_read_mem(tp, offset + j, &val);
13301                         if (val != test_pattern[i])
13302                                 return -EIO;
13303                 }
13304         }
13305         return 0;
13306 }
13307
13308 static int tg3_test_memory(struct tg3 *tp)
13309 {
13310         static struct mem_entry {
13311                 u32 offset;
13312                 u32 len;
13313         } mem_tbl_570x[] = {
13314                 { 0x00000000, 0x00b50},
13315                 { 0x00002000, 0x1c000},
13316                 { 0xffffffff, 0x00000}
13317         }, mem_tbl_5705[] = {
13318                 { 0x00000100, 0x0000c},
13319                 { 0x00000200, 0x00008},
13320                 { 0x00004000, 0x00800},
13321                 { 0x00006000, 0x01000},
13322                 { 0x00008000, 0x02000},
13323                 { 0x00010000, 0x0e000},
13324                 { 0xffffffff, 0x00000}
13325         }, mem_tbl_5755[] = {
13326                 { 0x00000200, 0x00008},
13327                 { 0x00004000, 0x00800},
13328                 { 0x00006000, 0x00800},
13329                 { 0x00008000, 0x02000},
13330                 { 0x00010000, 0x0c000},
13331                 { 0xffffffff, 0x00000}
13332         }, mem_tbl_5906[] = {
13333                 { 0x00000200, 0x00008},
13334                 { 0x00004000, 0x00400},
13335                 { 0x00006000, 0x00400},
13336                 { 0x00008000, 0x01000},
13337                 { 0x00010000, 0x01000},
13338                 { 0xffffffff, 0x00000}
13339         }, mem_tbl_5717[] = {
13340                 { 0x00000200, 0x00008},
13341                 { 0x00010000, 0x0a000},
13342                 { 0x00020000, 0x13c00},
13343                 { 0xffffffff, 0x00000}
13344         }, mem_tbl_57765[] = {
13345                 { 0x00000200, 0x00008},
13346                 { 0x00004000, 0x00800},
13347                 { 0x00006000, 0x09800},
13348                 { 0x00010000, 0x0a000},
13349                 { 0xffffffff, 0x00000}
13350         };
13351         struct mem_entry *mem_tbl;
13352         int err = 0;
13353         int i;
13354
13355         if (tg3_flag(tp, 5717_PLUS))
13356                 mem_tbl = mem_tbl_5717;
13357         else if (tg3_flag(tp, 57765_CLASS) ||
13358                  tg3_asic_rev(tp) == ASIC_REV_5762)
13359                 mem_tbl = mem_tbl_57765;
13360         else if (tg3_flag(tp, 5755_PLUS))
13361                 mem_tbl = mem_tbl_5755;
13362         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13363                 mem_tbl = mem_tbl_5906;
13364         else if (tg3_flag(tp, 5705_PLUS))
13365                 mem_tbl = mem_tbl_5705;
13366         else
13367                 mem_tbl = mem_tbl_570x;
13368
13369         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13370                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13371                 if (err)
13372                         break;
13373         }
13374
13375         return err;
13376 }
13377
13378 #define TG3_TSO_MSS             500
13379
13380 #define TG3_TSO_IP_HDR_LEN      20
13381 #define TG3_TSO_TCP_HDR_LEN     20
13382 #define TG3_TSO_TCP_OPT_LEN     12
13383
13384 static const u8 tg3_tso_header[] = {
13385 0x08, 0x00,
13386 0x45, 0x00, 0x00, 0x00,
13387 0x00, 0x00, 0x40, 0x00,
13388 0x40, 0x06, 0x00, 0x00,
13389 0x0a, 0x00, 0x00, 0x01,
13390 0x0a, 0x00, 0x00, 0x02,
13391 0x0d, 0x00, 0xe0, 0x00,
13392 0x00, 0x00, 0x01, 0x00,
13393 0x00, 0x00, 0x02, 0x00,
13394 0x80, 0x10, 0x10, 0x00,
13395 0x14, 0x09, 0x00, 0x00,
13396 0x01, 0x01, 0x08, 0x0a,
13397 0x11, 0x11, 0x11, 0x11,
13398 0x11, 0x11, 0x11, 0x11,
13399 };
13400
13401 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13402 {
13403         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13404         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13405         u32 budget;
13406         struct sk_buff *skb;
13407         u8 *tx_data, *rx_data;
13408         dma_addr_t map;
13409         int num_pkts, tx_len, rx_len, i, err;
13410         struct tg3_rx_buffer_desc *desc;
13411         struct tg3_napi *tnapi, *rnapi;
13412         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13413
13414         tnapi = &tp->napi[0];
13415         rnapi = &tp->napi[0];
13416         if (tp->irq_cnt > 1) {
13417                 if (tg3_flag(tp, ENABLE_RSS))
13418                         rnapi = &tp->napi[1];
13419                 if (tg3_flag(tp, ENABLE_TSS))
13420                         tnapi = &tp->napi[1];
13421         }
13422         coal_now = tnapi->coal_now | rnapi->coal_now;
13423
13424         err = -EIO;
13425
13426         tx_len = pktsz;
13427         skb = netdev_alloc_skb(tp->dev, tx_len);
13428         if (!skb)
13429                 return -ENOMEM;
13430
13431         tx_data = skb_put(skb, tx_len);
13432         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13433         memset(tx_data + ETH_ALEN, 0x0, 8);
13434
13435         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13436
13437         if (tso_loopback) {
13438                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13439
13440                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13441                               TG3_TSO_TCP_OPT_LEN;
13442
13443                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13444                        sizeof(tg3_tso_header));
13445                 mss = TG3_TSO_MSS;
13446
13447                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13448                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13449
13450                 /* Set the total length field in the IP header */
13451                 iph->tot_len = htons((u16)(mss + hdr_len));
13452
13453                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13454                               TXD_FLAG_CPU_POST_DMA);
13455
13456                 if (tg3_flag(tp, HW_TSO_1) ||
13457                     tg3_flag(tp, HW_TSO_2) ||
13458                     tg3_flag(tp, HW_TSO_3)) {
13459                         struct tcphdr *th;
13460                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13461                         th = (struct tcphdr *)&tx_data[val];
13462                         th->check = 0;
13463                 } else
13464                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13465
13466                 if (tg3_flag(tp, HW_TSO_3)) {
13467                         mss |= (hdr_len & 0xc) << 12;
13468                         if (hdr_len & 0x10)
13469                                 base_flags |= 0x00000010;
13470                         base_flags |= (hdr_len & 0x3e0) << 5;
13471                 } else if (tg3_flag(tp, HW_TSO_2))
13472                         mss |= hdr_len << 9;
13473                 else if (tg3_flag(tp, HW_TSO_1) ||
13474                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13475                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13476                 } else {
13477                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13478                 }
13479
13480                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13481         } else {
13482                 num_pkts = 1;
13483                 data_off = ETH_HLEN;
13484
13485                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13486                     tx_len > VLAN_ETH_FRAME_LEN)
13487                         base_flags |= TXD_FLAG_JMB_PKT;
13488         }
13489
13490         for (i = data_off; i < tx_len; i++)
13491                 tx_data[i] = (u8) (i & 0xff);
13492
13493         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13494         if (pci_dma_mapping_error(tp->pdev, map)) {
13495                 dev_kfree_skb(skb);
13496                 return -EIO;
13497         }
13498
13499         val = tnapi->tx_prod;
13500         tnapi->tx_buffers[val].skb = skb;
13501         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13502
13503         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13504                rnapi->coal_now);
13505
13506         udelay(10);
13507
13508         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13509
13510         budget = tg3_tx_avail(tnapi);
13511         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13512                             base_flags | TXD_FLAG_END, mss, 0)) {
13513                 tnapi->tx_buffers[val].skb = NULL;
13514                 dev_kfree_skb(skb);
13515                 return -EIO;
13516         }
13517
13518         tnapi->tx_prod++;
13519
13520         /* Sync BD data before updating mailbox */
13521         wmb();
13522
13523         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13524         tr32_mailbox(tnapi->prodmbox);
13525
13526         udelay(10);
13527
13528         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13529         for (i = 0; i < 35; i++) {
13530                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13531                        coal_now);
13532
13533                 udelay(10);
13534
13535                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13536                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13537                 if ((tx_idx == tnapi->tx_prod) &&
13538                     (rx_idx == (rx_start_idx + num_pkts)))
13539                         break;
13540         }
13541
13542         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13543         dev_kfree_skb(skb);
13544
13545         if (tx_idx != tnapi->tx_prod)
13546                 goto out;
13547
13548         if (rx_idx != rx_start_idx + num_pkts)
13549                 goto out;
13550
13551         val = data_off;
13552         while (rx_idx != rx_start_idx) {
13553                 desc = &rnapi->rx_rcb[rx_start_idx++];
13554                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13555                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13556
13557                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13558                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13559                         goto out;
13560
13561                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13562                          - ETH_FCS_LEN;
13563
13564                 if (!tso_loopback) {
13565                         if (rx_len != tx_len)
13566                                 goto out;
13567
13568                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13569                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13570                                         goto out;
13571                         } else {
13572                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13573                                         goto out;
13574                         }
13575                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13576                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13577                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13578                         goto out;
13579                 }
13580
13581                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13582                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13583                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13584                                              mapping);
13585                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13586                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13587                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13588                                              mapping);
13589                 } else
13590                         goto out;
13591
13592                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13593                                             PCI_DMA_FROMDEVICE);
13594
13595                 rx_data += TG3_RX_OFFSET(tp);
13596                 for (i = data_off; i < rx_len; i++, val++) {
13597                         if (*(rx_data + i) != (u8) (val & 0xff))
13598                                 goto out;
13599                 }
13600         }
13601
13602         err = 0;
13603
13604         /* tg3_free_rings will unmap and free the rx_data */
13605 out:
13606         return err;
13607 }
13608
13609 #define TG3_STD_LOOPBACK_FAILED         1
13610 #define TG3_JMB_LOOPBACK_FAILED         2
13611 #define TG3_TSO_LOOPBACK_FAILED         4
13612 #define TG3_LOOPBACK_FAILED \
13613         (TG3_STD_LOOPBACK_FAILED | \
13614          TG3_JMB_LOOPBACK_FAILED | \
13615          TG3_TSO_LOOPBACK_FAILED)
13616
13617 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13618 {
13619         int err = -EIO;
13620         u32 eee_cap;
13621         u32 jmb_pkt_sz = 9000;
13622
13623         if (tp->dma_limit)
13624                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13625
13626         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13627         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13628
13629         if (!netif_running(tp->dev)) {
13630                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13631                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13632                 if (do_extlpbk)
13633                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13634                 goto done;
13635         }
13636
13637         err = tg3_reset_hw(tp, true);
13638         if (err) {
13639                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13640                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13641                 if (do_extlpbk)
13642                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643                 goto done;
13644         }
13645
13646         if (tg3_flag(tp, ENABLE_RSS)) {
13647                 int i;
13648
13649                 /* Reroute all rx packets to the 1st queue */
13650                 for (i = MAC_RSS_INDIR_TBL_0;
13651                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13652                         tw32(i, 0x0);
13653         }
13654
13655         /* HW errata - mac loopback fails in some cases on 5780.
13656          * Normal traffic and PHY loopback are not affected by
13657          * errata.  Also, the MAC loopback test is deprecated for
13658          * all newer ASIC revisions.
13659          */
13660         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13661             !tg3_flag(tp, CPMU_PRESENT)) {
13662                 tg3_mac_loopback(tp, true);
13663
13664                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13665                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13666
13667                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13668                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13669                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13670
13671                 tg3_mac_loopback(tp, false);
13672         }
13673
13674         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13675             !tg3_flag(tp, USE_PHYLIB)) {
13676                 int i;
13677
13678                 tg3_phy_lpbk_set(tp, 0, false);
13679
13680                 /* Wait for link */
13681                 for (i = 0; i < 100; i++) {
13682                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13683                                 break;
13684                         mdelay(1);
13685                 }
13686
13687                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13688                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13689                 if (tg3_flag(tp, TSO_CAPABLE) &&
13690                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13691                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13692                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13693                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13694                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13695
13696                 if (do_extlpbk) {
13697                         tg3_phy_lpbk_set(tp, 0, true);
13698
13699                         /* All link indications report up, but the hardware
13700                          * isn't really ready for about 20 msec.  Double it
13701                          * to be sure.
13702                          */
13703                         mdelay(40);
13704
13705                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13706                                 data[TG3_EXT_LOOPB_TEST] |=
13707                                                         TG3_STD_LOOPBACK_FAILED;
13708                         if (tg3_flag(tp, TSO_CAPABLE) &&
13709                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13710                                 data[TG3_EXT_LOOPB_TEST] |=
13711                                                         TG3_TSO_LOOPBACK_FAILED;
13712                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13713                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13714                                 data[TG3_EXT_LOOPB_TEST] |=
13715                                                         TG3_JMB_LOOPBACK_FAILED;
13716                 }
13717
13718                 /* Re-enable gphy autopowerdown. */
13719                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13720                         tg3_phy_toggle_apd(tp, true);
13721         }
13722
13723         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13724                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13725
13726 done:
13727         tp->phy_flags |= eee_cap;
13728
13729         return err;
13730 }
13731
13732 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13733                           u64 *data)
13734 {
13735         struct tg3 *tp = netdev_priv(dev);
13736         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13737
13738         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13739                 if (tg3_power_up(tp)) {
13740                         etest->flags |= ETH_TEST_FL_FAILED;
13741                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13742                         return;
13743                 }
13744                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13745         }
13746
13747         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13748
13749         if (tg3_test_nvram(tp) != 0) {
13750                 etest->flags |= ETH_TEST_FL_FAILED;
13751                 data[TG3_NVRAM_TEST] = 1;
13752         }
13753         if (!doextlpbk && tg3_test_link(tp)) {
13754                 etest->flags |= ETH_TEST_FL_FAILED;
13755                 data[TG3_LINK_TEST] = 1;
13756         }
13757         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13758                 int err, err2 = 0, irq_sync = 0;
13759
13760                 if (netif_running(dev)) {
13761                         tg3_phy_stop(tp);
13762                         tg3_netif_stop(tp);
13763                         irq_sync = 1;
13764                 }
13765
13766                 tg3_full_lock(tp, irq_sync);
13767                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13768                 err = tg3_nvram_lock(tp);
13769                 tg3_halt_cpu(tp, RX_CPU_BASE);
13770                 if (!tg3_flag(tp, 5705_PLUS))
13771                         tg3_halt_cpu(tp, TX_CPU_BASE);
13772                 if (!err)
13773                         tg3_nvram_unlock(tp);
13774
13775                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13776                         tg3_phy_reset(tp);
13777
13778                 if (tg3_test_registers(tp) != 0) {
13779                         etest->flags |= ETH_TEST_FL_FAILED;
13780                         data[TG3_REGISTER_TEST] = 1;
13781                 }
13782
13783                 if (tg3_test_memory(tp) != 0) {
13784                         etest->flags |= ETH_TEST_FL_FAILED;
13785                         data[TG3_MEMORY_TEST] = 1;
13786                 }
13787
13788                 if (doextlpbk)
13789                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13790
13791                 if (tg3_test_loopback(tp, data, doextlpbk))
13792                         etest->flags |= ETH_TEST_FL_FAILED;
13793
13794                 tg3_full_unlock(tp);
13795
13796                 if (tg3_test_interrupt(tp) != 0) {
13797                         etest->flags |= ETH_TEST_FL_FAILED;
13798                         data[TG3_INTERRUPT_TEST] = 1;
13799                 }
13800
13801                 tg3_full_lock(tp, 0);
13802
13803                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13804                 if (netif_running(dev)) {
13805                         tg3_flag_set(tp, INIT_COMPLETE);
13806                         err2 = tg3_restart_hw(tp, true);
13807                         if (!err2)
13808                                 tg3_netif_start(tp);
13809                 }
13810
13811                 tg3_full_unlock(tp);
13812
13813                 if (irq_sync && !err2)
13814                         tg3_phy_start(tp);
13815         }
13816         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13817                 tg3_power_down_prepare(tp);
13818
13819 }
13820
13821 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13822 {
13823         struct tg3 *tp = netdev_priv(dev);
13824         struct hwtstamp_config stmpconf;
13825
13826         if (!tg3_flag(tp, PTP_CAPABLE))
13827                 return -EOPNOTSUPP;
13828
13829         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13830                 return -EFAULT;
13831
13832         if (stmpconf.flags)
13833                 return -EINVAL;
13834
13835         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13836             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13837                 return -ERANGE;
13838
13839         switch (stmpconf.rx_filter) {
13840         case HWTSTAMP_FILTER_NONE:
13841                 tp->rxptpctl = 0;
13842                 break;
13843         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13845                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13849                                TG3_RX_PTP_CTL_SYNC_EVNT;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13853                                TG3_RX_PTP_CTL_DELAY_REQ;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13857                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13861                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13862                 break;
13863         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13865                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13866                 break;
13867         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13869                                TG3_RX_PTP_CTL_SYNC_EVNT;
13870                 break;
13871         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13873                                TG3_RX_PTP_CTL_SYNC_EVNT;
13874                 break;
13875         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13877                                TG3_RX_PTP_CTL_SYNC_EVNT;
13878                 break;
13879         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13881                                TG3_RX_PTP_CTL_DELAY_REQ;
13882                 break;
13883         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13885                                TG3_RX_PTP_CTL_DELAY_REQ;
13886                 break;
13887         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13889                                TG3_RX_PTP_CTL_DELAY_REQ;
13890                 break;
13891         default:
13892                 return -ERANGE;
13893         }
13894
13895         if (netif_running(dev) && tp->rxptpctl)
13896                 tw32(TG3_RX_PTP_CTL,
13897                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13898
13899         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13900                 tg3_flag_set(tp, TX_TSTAMP_EN);
13901         else
13902                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13903
13904         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13905                 -EFAULT : 0;
13906 }
13907
13908 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13909 {
13910         struct tg3 *tp = netdev_priv(dev);
13911         struct hwtstamp_config stmpconf;
13912
13913         if (!tg3_flag(tp, PTP_CAPABLE))
13914                 return -EOPNOTSUPP;
13915
13916         stmpconf.flags = 0;
13917         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13918                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13919
13920         switch (tp->rxptpctl) {
13921         case 0:
13922                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13923                 break;
13924         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13925                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13926                 break;
13927         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13928                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13929                 break;
13930         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13932                 break;
13933         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13935                 break;
13936         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13937                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13938                 break;
13939         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13940                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13941                 break;
13942         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13943                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13944                 break;
13945         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13946                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13947                 break;
13948         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13949                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13950                 break;
13951         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13959                 break;
13960         default:
13961                 WARN_ON_ONCE(1);
13962                 return -ERANGE;
13963         }
13964
13965         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13966                 -EFAULT : 0;
13967 }
13968
13969 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13970 {
13971         struct mii_ioctl_data *data = if_mii(ifr);
13972         struct tg3 *tp = netdev_priv(dev);
13973         int err;
13974
13975         if (tg3_flag(tp, USE_PHYLIB)) {
13976                 struct phy_device *phydev;
13977                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13978                         return -EAGAIN;
13979                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13980                 return phy_mii_ioctl(phydev, ifr, cmd);
13981         }
13982
13983         switch (cmd) {
13984         case SIOCGMIIPHY:
13985                 data->phy_id = tp->phy_addr;
13986
13987                 /* fallthru */
13988         case SIOCGMIIREG: {
13989                 u32 mii_regval;
13990
13991                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13992                         break;                  /* We have no PHY */
13993
13994                 if (!netif_running(dev))
13995                         return -EAGAIN;
13996
13997                 spin_lock_bh(&tp->lock);
13998                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13999                                     data->reg_num & 0x1f, &mii_regval);
14000                 spin_unlock_bh(&tp->lock);
14001
14002                 data->val_out = mii_regval;
14003
14004                 return err;
14005         }
14006
14007         case SIOCSMIIREG:
14008                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14009                         break;                  /* We have no PHY */
14010
14011                 if (!netif_running(dev))
14012                         return -EAGAIN;
14013
14014                 spin_lock_bh(&tp->lock);
14015                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14016                                      data->reg_num & 0x1f, data->val_in);
14017                 spin_unlock_bh(&tp->lock);
14018
14019                 return err;
14020
14021         case SIOCSHWTSTAMP:
14022                 return tg3_hwtstamp_set(dev, ifr);
14023
14024         case SIOCGHWTSTAMP:
14025                 return tg3_hwtstamp_get(dev, ifr);
14026
14027         default:
14028                 /* do nothing */
14029                 break;
14030         }
14031         return -EOPNOTSUPP;
14032 }
14033
14034 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14035 {
14036         struct tg3 *tp = netdev_priv(dev);
14037
14038         memcpy(ec, &tp->coal, sizeof(*ec));
14039         return 0;
14040 }
14041
14042 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14043 {
14044         struct tg3 *tp = netdev_priv(dev);
14045         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14046         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14047
14048         if (!tg3_flag(tp, 5705_PLUS)) {
14049                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14050                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14051                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14052                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14053         }
14054
14055         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14056             (!ec->rx_coalesce_usecs) ||
14057             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14058             (!ec->tx_coalesce_usecs) ||
14059             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14060             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14061             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14062             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14063             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14064             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14065             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14066             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14067                 return -EINVAL;
14068
14069         /* Only copy relevant parameters, ignore all others. */
14070         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14071         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14072         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14073         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14074         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14075         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14076         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14077         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14078         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14079
14080         if (netif_running(dev)) {
14081                 tg3_full_lock(tp, 0);
14082                 __tg3_set_coalesce(tp, &tp->coal);
14083                 tg3_full_unlock(tp);
14084         }
14085         return 0;
14086 }
14087
14088 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14089 {
14090         struct tg3 *tp = netdev_priv(dev);
14091
14092         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14093                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14094                 return -EOPNOTSUPP;
14095         }
14096
14097         if (edata->advertised != tp->eee.advertised) {
14098                 netdev_warn(tp->dev,
14099                             "Direct manipulation of EEE advertisement is not supported\n");
14100                 return -EINVAL;
14101         }
14102
14103         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14104                 netdev_warn(tp->dev,
14105                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14106                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14107                 return -EINVAL;
14108         }
14109
14110         tp->eee = *edata;
14111
14112         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14113         tg3_warn_mgmt_link_flap(tp);
14114
14115         if (netif_running(tp->dev)) {
14116                 tg3_full_lock(tp, 0);
14117                 tg3_setup_eee(tp);
14118                 tg3_phy_reset(tp);
14119                 tg3_full_unlock(tp);
14120         }
14121
14122         return 0;
14123 }
14124
14125 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14126 {
14127         struct tg3 *tp = netdev_priv(dev);
14128
14129         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14130                 netdev_warn(tp->dev,
14131                             "Board does not support EEE!\n");
14132                 return -EOPNOTSUPP;
14133         }
14134
14135         *edata = tp->eee;
14136         return 0;
14137 }
14138
14139 static const struct ethtool_ops tg3_ethtool_ops = {
14140         .get_drvinfo            = tg3_get_drvinfo,
14141         .get_regs_len           = tg3_get_regs_len,
14142         .get_regs               = tg3_get_regs,
14143         .get_wol                = tg3_get_wol,
14144         .set_wol                = tg3_set_wol,
14145         .get_msglevel           = tg3_get_msglevel,
14146         .set_msglevel           = tg3_set_msglevel,
14147         .nway_reset             = tg3_nway_reset,
14148         .get_link               = ethtool_op_get_link,
14149         .get_eeprom_len         = tg3_get_eeprom_len,
14150         .get_eeprom             = tg3_get_eeprom,
14151         .set_eeprom             = tg3_set_eeprom,
14152         .get_ringparam          = tg3_get_ringparam,
14153         .set_ringparam          = tg3_set_ringparam,
14154         .get_pauseparam         = tg3_get_pauseparam,
14155         .set_pauseparam         = tg3_set_pauseparam,
14156         .self_test              = tg3_self_test,
14157         .get_strings            = tg3_get_strings,
14158         .set_phys_id            = tg3_set_phys_id,
14159         .get_ethtool_stats      = tg3_get_ethtool_stats,
14160         .get_coalesce           = tg3_get_coalesce,
14161         .set_coalesce           = tg3_set_coalesce,
14162         .get_sset_count         = tg3_get_sset_count,
14163         .get_rxnfc              = tg3_get_rxnfc,
14164         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14165         .get_rxfh               = tg3_get_rxfh,
14166         .set_rxfh               = tg3_set_rxfh,
14167         .get_channels           = tg3_get_channels,
14168         .set_channels           = tg3_set_channels,
14169         .get_ts_info            = tg3_get_ts_info,
14170         .get_eee                = tg3_get_eee,
14171         .set_eee                = tg3_set_eee,
14172         .get_link_ksettings     = tg3_get_link_ksettings,
14173         .set_link_ksettings     = tg3_set_link_ksettings,
14174 };
14175
14176 static void tg3_get_stats64(struct net_device *dev,
14177                             struct rtnl_link_stats64 *stats)
14178 {
14179         struct tg3 *tp = netdev_priv(dev);
14180
14181         spin_lock_bh(&tp->lock);
14182         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14183                 *stats = tp->net_stats_prev;
14184                 spin_unlock_bh(&tp->lock);
14185                 return;
14186         }
14187
14188         tg3_get_nstats(tp, stats);
14189         spin_unlock_bh(&tp->lock);
14190 }
14191
14192 static void tg3_set_rx_mode(struct net_device *dev)
14193 {
14194         struct tg3 *tp = netdev_priv(dev);
14195
14196         if (!netif_running(dev))
14197                 return;
14198
14199         tg3_full_lock(tp, 0);
14200         __tg3_set_rx_mode(dev);
14201         tg3_full_unlock(tp);
14202 }
14203
14204 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14205                                int new_mtu)
14206 {
14207         dev->mtu = new_mtu;
14208
14209         if (new_mtu > ETH_DATA_LEN) {
14210                 if (tg3_flag(tp, 5780_CLASS)) {
14211                         netdev_update_features(dev);
14212                         tg3_flag_clear(tp, TSO_CAPABLE);
14213                 } else {
14214                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14215                 }
14216         } else {
14217                 if (tg3_flag(tp, 5780_CLASS)) {
14218                         tg3_flag_set(tp, TSO_CAPABLE);
14219                         netdev_update_features(dev);
14220                 }
14221                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14222         }
14223 }
14224
14225 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14226 {
14227         struct tg3 *tp = netdev_priv(dev);
14228         int err;
14229         bool reset_phy = false;
14230
14231         if (!netif_running(dev)) {
14232                 /* We'll just catch it later when the
14233                  * device is up'd.
14234                  */
14235                 tg3_set_mtu(dev, tp, new_mtu);
14236                 return 0;
14237         }
14238
14239         tg3_phy_stop(tp);
14240
14241         tg3_netif_stop(tp);
14242
14243         tg3_set_mtu(dev, tp, new_mtu);
14244
14245         tg3_full_lock(tp, 1);
14246
14247         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14248
14249         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14250          * breaks all requests to 256 bytes.
14251          */
14252         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14253             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14254             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14255             tg3_asic_rev(tp) == ASIC_REV_5720)
14256                 reset_phy = true;
14257
14258         err = tg3_restart_hw(tp, reset_phy);
14259
14260         if (!err)
14261                 tg3_netif_start(tp);
14262
14263         tg3_full_unlock(tp);
14264
14265         if (!err)
14266                 tg3_phy_start(tp);
14267
14268         return err;
14269 }
14270
14271 static const struct net_device_ops tg3_netdev_ops = {
14272         .ndo_open               = tg3_open,
14273         .ndo_stop               = tg3_close,
14274         .ndo_start_xmit         = tg3_start_xmit,
14275         .ndo_get_stats64        = tg3_get_stats64,
14276         .ndo_validate_addr      = eth_validate_addr,
14277         .ndo_set_rx_mode        = tg3_set_rx_mode,
14278         .ndo_set_mac_address    = tg3_set_mac_addr,
14279         .ndo_do_ioctl           = tg3_ioctl,
14280         .ndo_tx_timeout         = tg3_tx_timeout,
14281         .ndo_change_mtu         = tg3_change_mtu,
14282         .ndo_fix_features       = tg3_fix_features,
14283         .ndo_set_features       = tg3_set_features,
14284 #ifdef CONFIG_NET_POLL_CONTROLLER
14285         .ndo_poll_controller    = tg3_poll_controller,
14286 #endif
14287 };
14288
14289 static void tg3_get_eeprom_size(struct tg3 *tp)
14290 {
14291         u32 cursize, val, magic;
14292
14293         tp->nvram_size = EEPROM_CHIP_SIZE;
14294
14295         if (tg3_nvram_read(tp, 0, &magic) != 0)
14296                 return;
14297
14298         if ((magic != TG3_EEPROM_MAGIC) &&
14299             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14300             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14301                 return;
14302
14303         /*
14304          * Size the chip by reading offsets at increasing powers of two.
14305          * When we encounter our validation signature, we know the addressing
14306          * has wrapped around, and thus have our chip size.
14307          */
14308         cursize = 0x10;
14309
14310         while (cursize < tp->nvram_size) {
14311                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14312                         return;
14313
14314                 if (val == magic)
14315                         break;
14316
14317                 cursize <<= 1;
14318         }
14319
14320         tp->nvram_size = cursize;
14321 }
14322
14323 static void tg3_get_nvram_size(struct tg3 *tp)
14324 {
14325         u32 val;
14326
14327         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14328                 return;
14329
14330         /* Selfboot format */
14331         if (val != TG3_EEPROM_MAGIC) {
14332                 tg3_get_eeprom_size(tp);
14333                 return;
14334         }
14335
14336         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14337                 if (val != 0) {
14338                         /* This is confusing.  We want to operate on the
14339                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14340                          * call will read from NVRAM and byteswap the data
14341                          * according to the byteswapping settings for all
14342                          * other register accesses.  This ensures the data we
14343                          * want will always reside in the lower 16-bits.
14344                          * However, the data in NVRAM is in LE format, which
14345                          * means the data from the NVRAM read will always be
14346                          * opposite the endianness of the CPU.  The 16-bit
14347                          * byteswap then brings the data to CPU endianness.
14348                          */
14349                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14350                         return;
14351                 }
14352         }
14353         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14354 }
14355
14356 static void tg3_get_nvram_info(struct tg3 *tp)
14357 {
14358         u32 nvcfg1;
14359
14360         nvcfg1 = tr32(NVRAM_CFG1);
14361         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14362                 tg3_flag_set(tp, FLASH);
14363         } else {
14364                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14365                 tw32(NVRAM_CFG1, nvcfg1);
14366         }
14367
14368         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14369             tg3_flag(tp, 5780_CLASS)) {
14370                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14371                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14372                         tp->nvram_jedecnum = JEDEC_ATMEL;
14373                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14374                         tg3_flag_set(tp, NVRAM_BUFFERED);
14375                         break;
14376                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14377                         tp->nvram_jedecnum = JEDEC_ATMEL;
14378                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14379                         break;
14380                 case FLASH_VENDOR_ATMEL_EEPROM:
14381                         tp->nvram_jedecnum = JEDEC_ATMEL;
14382                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14383                         tg3_flag_set(tp, NVRAM_BUFFERED);
14384                         break;
14385                 case FLASH_VENDOR_ST:
14386                         tp->nvram_jedecnum = JEDEC_ST;
14387                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14388                         tg3_flag_set(tp, NVRAM_BUFFERED);
14389                         break;
14390                 case FLASH_VENDOR_SAIFUN:
14391                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14392                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14393                         break;
14394                 case FLASH_VENDOR_SST_SMALL:
14395                 case FLASH_VENDOR_SST_LARGE:
14396                         tp->nvram_jedecnum = JEDEC_SST;
14397                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14398                         break;
14399                 }
14400         } else {
14401                 tp->nvram_jedecnum = JEDEC_ATMEL;
14402                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14403                 tg3_flag_set(tp, NVRAM_BUFFERED);
14404         }
14405 }
14406
14407 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14408 {
14409         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14410         case FLASH_5752PAGE_SIZE_256:
14411                 tp->nvram_pagesize = 256;
14412                 break;
14413         case FLASH_5752PAGE_SIZE_512:
14414                 tp->nvram_pagesize = 512;
14415                 break;
14416         case FLASH_5752PAGE_SIZE_1K:
14417                 tp->nvram_pagesize = 1024;
14418                 break;
14419         case FLASH_5752PAGE_SIZE_2K:
14420                 tp->nvram_pagesize = 2048;
14421                 break;
14422         case FLASH_5752PAGE_SIZE_4K:
14423                 tp->nvram_pagesize = 4096;
14424                 break;
14425         case FLASH_5752PAGE_SIZE_264:
14426                 tp->nvram_pagesize = 264;
14427                 break;
14428         case FLASH_5752PAGE_SIZE_528:
14429                 tp->nvram_pagesize = 528;
14430                 break;
14431         }
14432 }
14433
14434 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14435 {
14436         u32 nvcfg1;
14437
14438         nvcfg1 = tr32(NVRAM_CFG1);
14439
14440         /* NVRAM protection for TPM */
14441         if (nvcfg1 & (1 << 27))
14442                 tg3_flag_set(tp, PROTECTED_NVRAM);
14443
14444         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14445         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14446         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14447                 tp->nvram_jedecnum = JEDEC_ATMEL;
14448                 tg3_flag_set(tp, NVRAM_BUFFERED);
14449                 break;
14450         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14451                 tp->nvram_jedecnum = JEDEC_ATMEL;
14452                 tg3_flag_set(tp, NVRAM_BUFFERED);
14453                 tg3_flag_set(tp, FLASH);
14454                 break;
14455         case FLASH_5752VENDOR_ST_M45PE10:
14456         case FLASH_5752VENDOR_ST_M45PE20:
14457         case FLASH_5752VENDOR_ST_M45PE40:
14458                 tp->nvram_jedecnum = JEDEC_ST;
14459                 tg3_flag_set(tp, NVRAM_BUFFERED);
14460                 tg3_flag_set(tp, FLASH);
14461                 break;
14462         }
14463
14464         if (tg3_flag(tp, FLASH)) {
14465                 tg3_nvram_get_pagesize(tp, nvcfg1);
14466         } else {
14467                 /* For eeprom, set pagesize to maximum eeprom size */
14468                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14469
14470                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14471                 tw32(NVRAM_CFG1, nvcfg1);
14472         }
14473 }
14474
14475 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14476 {
14477         u32 nvcfg1, protect = 0;
14478
14479         nvcfg1 = tr32(NVRAM_CFG1);
14480
14481         /* NVRAM protection for TPM */
14482         if (nvcfg1 & (1 << 27)) {
14483                 tg3_flag_set(tp, PROTECTED_NVRAM);
14484                 protect = 1;
14485         }
14486
14487         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14488         switch (nvcfg1) {
14489         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14490         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14491         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14492         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14493                 tp->nvram_jedecnum = JEDEC_ATMEL;
14494                 tg3_flag_set(tp, NVRAM_BUFFERED);
14495                 tg3_flag_set(tp, FLASH);
14496                 tp->nvram_pagesize = 264;
14497                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14498                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14499                         tp->nvram_size = (protect ? 0x3e200 :
14500                                           TG3_NVRAM_SIZE_512KB);
14501                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14502                         tp->nvram_size = (protect ? 0x1f200 :
14503                                           TG3_NVRAM_SIZE_256KB);
14504                 else
14505                         tp->nvram_size = (protect ? 0x1f200 :
14506                                           TG3_NVRAM_SIZE_128KB);
14507                 break;
14508         case FLASH_5752VENDOR_ST_M45PE10:
14509         case FLASH_5752VENDOR_ST_M45PE20:
14510         case FLASH_5752VENDOR_ST_M45PE40:
14511                 tp->nvram_jedecnum = JEDEC_ST;
14512                 tg3_flag_set(tp, NVRAM_BUFFERED);
14513                 tg3_flag_set(tp, FLASH);
14514                 tp->nvram_pagesize = 256;
14515                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14516                         tp->nvram_size = (protect ?
14517                                           TG3_NVRAM_SIZE_64KB :
14518                                           TG3_NVRAM_SIZE_128KB);
14519                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14520                         tp->nvram_size = (protect ?
14521                                           TG3_NVRAM_SIZE_64KB :
14522                                           TG3_NVRAM_SIZE_256KB);
14523                 else
14524                         tp->nvram_size = (protect ?
14525                                           TG3_NVRAM_SIZE_128KB :
14526                                           TG3_NVRAM_SIZE_512KB);
14527                 break;
14528         }
14529 }
14530
14531 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14532 {
14533         u32 nvcfg1;
14534
14535         nvcfg1 = tr32(NVRAM_CFG1);
14536
14537         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14538         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14539         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14540         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14541         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14542                 tp->nvram_jedecnum = JEDEC_ATMEL;
14543                 tg3_flag_set(tp, NVRAM_BUFFERED);
14544                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14545
14546                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14547                 tw32(NVRAM_CFG1, nvcfg1);
14548                 break;
14549         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14550         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14551         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14552         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14553                 tp->nvram_jedecnum = JEDEC_ATMEL;
14554                 tg3_flag_set(tp, NVRAM_BUFFERED);
14555                 tg3_flag_set(tp, FLASH);
14556                 tp->nvram_pagesize = 264;
14557                 break;
14558         case FLASH_5752VENDOR_ST_M45PE10:
14559         case FLASH_5752VENDOR_ST_M45PE20:
14560         case FLASH_5752VENDOR_ST_M45PE40:
14561                 tp->nvram_jedecnum = JEDEC_ST;
14562                 tg3_flag_set(tp, NVRAM_BUFFERED);
14563                 tg3_flag_set(tp, FLASH);
14564                 tp->nvram_pagesize = 256;
14565                 break;
14566         }
14567 }
14568
14569 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14570 {
14571         u32 nvcfg1, protect = 0;
14572
14573         nvcfg1 = tr32(NVRAM_CFG1);
14574
14575         /* NVRAM protection for TPM */
14576         if (nvcfg1 & (1 << 27)) {
14577                 tg3_flag_set(tp, PROTECTED_NVRAM);
14578                 protect = 1;
14579         }
14580
14581         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14582         switch (nvcfg1) {
14583         case FLASH_5761VENDOR_ATMEL_ADB021D:
14584         case FLASH_5761VENDOR_ATMEL_ADB041D:
14585         case FLASH_5761VENDOR_ATMEL_ADB081D:
14586         case FLASH_5761VENDOR_ATMEL_ADB161D:
14587         case FLASH_5761VENDOR_ATMEL_MDB021D:
14588         case FLASH_5761VENDOR_ATMEL_MDB041D:
14589         case FLASH_5761VENDOR_ATMEL_MDB081D:
14590         case FLASH_5761VENDOR_ATMEL_MDB161D:
14591                 tp->nvram_jedecnum = JEDEC_ATMEL;
14592                 tg3_flag_set(tp, NVRAM_BUFFERED);
14593                 tg3_flag_set(tp, FLASH);
14594                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14595                 tp->nvram_pagesize = 256;
14596                 break;
14597         case FLASH_5761VENDOR_ST_A_M45PE20:
14598         case FLASH_5761VENDOR_ST_A_M45PE40:
14599         case FLASH_5761VENDOR_ST_A_M45PE80:
14600         case FLASH_5761VENDOR_ST_A_M45PE16:
14601         case FLASH_5761VENDOR_ST_M_M45PE20:
14602         case FLASH_5761VENDOR_ST_M_M45PE40:
14603         case FLASH_5761VENDOR_ST_M_M45PE80:
14604         case FLASH_5761VENDOR_ST_M_M45PE16:
14605                 tp->nvram_jedecnum = JEDEC_ST;
14606                 tg3_flag_set(tp, NVRAM_BUFFERED);
14607                 tg3_flag_set(tp, FLASH);
14608                 tp->nvram_pagesize = 256;
14609                 break;
14610         }
14611
14612         if (protect) {
14613                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14614         } else {
14615                 switch (nvcfg1) {
14616                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14617                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14618                 case FLASH_5761VENDOR_ST_A_M45PE16:
14619                 case FLASH_5761VENDOR_ST_M_M45PE16:
14620                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14621                         break;
14622                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14623                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14624                 case FLASH_5761VENDOR_ST_A_M45PE80:
14625                 case FLASH_5761VENDOR_ST_M_M45PE80:
14626                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14627                         break;
14628                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14629                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14630                 case FLASH_5761VENDOR_ST_A_M45PE40:
14631                 case FLASH_5761VENDOR_ST_M_M45PE40:
14632                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14633                         break;
14634                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14635                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14636                 case FLASH_5761VENDOR_ST_A_M45PE20:
14637                 case FLASH_5761VENDOR_ST_M_M45PE20:
14638                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14639                         break;
14640                 }
14641         }
14642 }
14643
14644 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14645 {
14646         tp->nvram_jedecnum = JEDEC_ATMEL;
14647         tg3_flag_set(tp, NVRAM_BUFFERED);
14648         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14649 }
14650
14651 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14652 {
14653         u32 nvcfg1;
14654
14655         nvcfg1 = tr32(NVRAM_CFG1);
14656
14657         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14658         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14659         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14660                 tp->nvram_jedecnum = JEDEC_ATMEL;
14661                 tg3_flag_set(tp, NVRAM_BUFFERED);
14662                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14663
14664                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14665                 tw32(NVRAM_CFG1, nvcfg1);
14666                 return;
14667         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14668         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14669         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14670         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14671         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14672         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14673         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14674                 tp->nvram_jedecnum = JEDEC_ATMEL;
14675                 tg3_flag_set(tp, NVRAM_BUFFERED);
14676                 tg3_flag_set(tp, FLASH);
14677
14678                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14679                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14680                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14681                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14682                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14683                         break;
14684                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14685                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14686                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14687                         break;
14688                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14689                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14690                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14691                         break;
14692                 }
14693                 break;
14694         case FLASH_5752VENDOR_ST_M45PE10:
14695         case FLASH_5752VENDOR_ST_M45PE20:
14696         case FLASH_5752VENDOR_ST_M45PE40:
14697                 tp->nvram_jedecnum = JEDEC_ST;
14698                 tg3_flag_set(tp, NVRAM_BUFFERED);
14699                 tg3_flag_set(tp, FLASH);
14700
14701                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14702                 case FLASH_5752VENDOR_ST_M45PE10:
14703                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14704                         break;
14705                 case FLASH_5752VENDOR_ST_M45PE20:
14706                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14707                         break;
14708                 case FLASH_5752VENDOR_ST_M45PE40:
14709                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14710                         break;
14711                 }
14712                 break;
14713         default:
14714                 tg3_flag_set(tp, NO_NVRAM);
14715                 return;
14716         }
14717
14718         tg3_nvram_get_pagesize(tp, nvcfg1);
14719         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14720                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14721 }
14722
14723
14724 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14725 {
14726         u32 nvcfg1;
14727
14728         nvcfg1 = tr32(NVRAM_CFG1);
14729
14730         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14731         case FLASH_5717VENDOR_ATMEL_EEPROM:
14732         case FLASH_5717VENDOR_MICRO_EEPROM:
14733                 tp->nvram_jedecnum = JEDEC_ATMEL;
14734                 tg3_flag_set(tp, NVRAM_BUFFERED);
14735                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14736
14737                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14738                 tw32(NVRAM_CFG1, nvcfg1);
14739                 return;
14740         case FLASH_5717VENDOR_ATMEL_MDB011D:
14741         case FLASH_5717VENDOR_ATMEL_ADB011B:
14742         case FLASH_5717VENDOR_ATMEL_ADB011D:
14743         case FLASH_5717VENDOR_ATMEL_MDB021D:
14744         case FLASH_5717VENDOR_ATMEL_ADB021B:
14745         case FLASH_5717VENDOR_ATMEL_ADB021D:
14746         case FLASH_5717VENDOR_ATMEL_45USPT:
14747                 tp->nvram_jedecnum = JEDEC_ATMEL;
14748                 tg3_flag_set(tp, NVRAM_BUFFERED);
14749                 tg3_flag_set(tp, FLASH);
14750
14751                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14752                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14753                         /* Detect size with tg3_nvram_get_size() */
14754                         break;
14755                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14756                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14757                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14758                         break;
14759                 default:
14760                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14761                         break;
14762                 }
14763                 break;
14764         case FLASH_5717VENDOR_ST_M_M25PE10:
14765         case FLASH_5717VENDOR_ST_A_M25PE10:
14766         case FLASH_5717VENDOR_ST_M_M45PE10:
14767         case FLASH_5717VENDOR_ST_A_M45PE10:
14768         case FLASH_5717VENDOR_ST_M_M25PE20:
14769         case FLASH_5717VENDOR_ST_A_M25PE20:
14770         case FLASH_5717VENDOR_ST_M_M45PE20:
14771         case FLASH_5717VENDOR_ST_A_M45PE20:
14772         case FLASH_5717VENDOR_ST_25USPT:
14773         case FLASH_5717VENDOR_ST_45USPT:
14774                 tp->nvram_jedecnum = JEDEC_ST;
14775                 tg3_flag_set(tp, NVRAM_BUFFERED);
14776                 tg3_flag_set(tp, FLASH);
14777
14778                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14779                 case FLASH_5717VENDOR_ST_M_M25PE20:
14780                 case FLASH_5717VENDOR_ST_M_M45PE20:
14781                         /* Detect size with tg3_nvram_get_size() */
14782                         break;
14783                 case FLASH_5717VENDOR_ST_A_M25PE20:
14784                 case FLASH_5717VENDOR_ST_A_M45PE20:
14785                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14786                         break;
14787                 default:
14788                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14789                         break;
14790                 }
14791                 break;
14792         default:
14793                 tg3_flag_set(tp, NO_NVRAM);
14794                 return;
14795         }
14796
14797         tg3_nvram_get_pagesize(tp, nvcfg1);
14798         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14799                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14800 }
14801
14802 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14803 {
14804         u32 nvcfg1, nvmpinstrp, nv_status;
14805
14806         nvcfg1 = tr32(NVRAM_CFG1);
14807         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14808
14809         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14810                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14811                         tg3_flag_set(tp, NO_NVRAM);
14812                         return;
14813                 }
14814
14815                 switch (nvmpinstrp) {
14816                 case FLASH_5762_MX25L_100:
14817                 case FLASH_5762_MX25L_200:
14818                 case FLASH_5762_MX25L_400:
14819                 case FLASH_5762_MX25L_800:
14820                 case FLASH_5762_MX25L_160_320:
14821                         tp->nvram_pagesize = 4096;
14822                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14823                         tg3_flag_set(tp, NVRAM_BUFFERED);
14824                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14825                         tg3_flag_set(tp, FLASH);
14826                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14827                         tp->nvram_size =
14828                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14829                                                 AUTOSENSE_DEVID_MASK)
14830                                         << AUTOSENSE_SIZE_IN_MB);
14831                         return;
14832
14833                 case FLASH_5762_EEPROM_HD:
14834                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14835                         break;
14836                 case FLASH_5762_EEPROM_LD:
14837                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14838                         break;
14839                 case FLASH_5720VENDOR_M_ST_M45PE20:
14840                         /* This pinstrap supports multiple sizes, so force it
14841                          * to read the actual size from location 0xf0.
14842                          */
14843                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14844                         break;
14845                 }
14846         }
14847
14848         switch (nvmpinstrp) {
14849         case FLASH_5720_EEPROM_HD:
14850         case FLASH_5720_EEPROM_LD:
14851                 tp->nvram_jedecnum = JEDEC_ATMEL;
14852                 tg3_flag_set(tp, NVRAM_BUFFERED);
14853
14854                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14855                 tw32(NVRAM_CFG1, nvcfg1);
14856                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14857                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14858                 else
14859                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14860                 return;
14861         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14862         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14863         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14864         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14865         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14866         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14867         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14868         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14869         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14870         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14871         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14872         case FLASH_5720VENDOR_ATMEL_45USPT:
14873                 tp->nvram_jedecnum = JEDEC_ATMEL;
14874                 tg3_flag_set(tp, NVRAM_BUFFERED);
14875                 tg3_flag_set(tp, FLASH);
14876
14877                 switch (nvmpinstrp) {
14878                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14879                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14880                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14881                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14882                         break;
14883                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14884                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14885                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14886                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14887                         break;
14888                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14889                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14890                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14891                         break;
14892                 default:
14893                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14894                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14895                         break;
14896                 }
14897                 break;
14898         case FLASH_5720VENDOR_M_ST_M25PE10:
14899         case FLASH_5720VENDOR_M_ST_M45PE10:
14900         case FLASH_5720VENDOR_A_ST_M25PE10:
14901         case FLASH_5720VENDOR_A_ST_M45PE10:
14902         case FLASH_5720VENDOR_M_ST_M25PE20:
14903         case FLASH_5720VENDOR_M_ST_M45PE20:
14904         case FLASH_5720VENDOR_A_ST_M25PE20:
14905         case FLASH_5720VENDOR_A_ST_M45PE20:
14906         case FLASH_5720VENDOR_M_ST_M25PE40:
14907         case FLASH_5720VENDOR_M_ST_M45PE40:
14908         case FLASH_5720VENDOR_A_ST_M25PE40:
14909         case FLASH_5720VENDOR_A_ST_M45PE40:
14910         case FLASH_5720VENDOR_M_ST_M25PE80:
14911         case FLASH_5720VENDOR_M_ST_M45PE80:
14912         case FLASH_5720VENDOR_A_ST_M25PE80:
14913         case FLASH_5720VENDOR_A_ST_M45PE80:
14914         case FLASH_5720VENDOR_ST_25USPT:
14915         case FLASH_5720VENDOR_ST_45USPT:
14916                 tp->nvram_jedecnum = JEDEC_ST;
14917                 tg3_flag_set(tp, NVRAM_BUFFERED);
14918                 tg3_flag_set(tp, FLASH);
14919
14920                 switch (nvmpinstrp) {
14921                 case FLASH_5720VENDOR_M_ST_M25PE20:
14922                 case FLASH_5720VENDOR_M_ST_M45PE20:
14923                 case FLASH_5720VENDOR_A_ST_M25PE20:
14924                 case FLASH_5720VENDOR_A_ST_M45PE20:
14925                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14926                         break;
14927                 case FLASH_5720VENDOR_M_ST_M25PE40:
14928                 case FLASH_5720VENDOR_M_ST_M45PE40:
14929                 case FLASH_5720VENDOR_A_ST_M25PE40:
14930                 case FLASH_5720VENDOR_A_ST_M45PE40:
14931                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14932                         break;
14933                 case FLASH_5720VENDOR_M_ST_M25PE80:
14934                 case FLASH_5720VENDOR_M_ST_M45PE80:
14935                 case FLASH_5720VENDOR_A_ST_M25PE80:
14936                 case FLASH_5720VENDOR_A_ST_M45PE80:
14937                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14938                         break;
14939                 default:
14940                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14941                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14942                         break;
14943                 }
14944                 break;
14945         default:
14946                 tg3_flag_set(tp, NO_NVRAM);
14947                 return;
14948         }
14949
14950         tg3_nvram_get_pagesize(tp, nvcfg1);
14951         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14952                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14953
14954         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14955                 u32 val;
14956
14957                 if (tg3_nvram_read(tp, 0, &val))
14958                         return;
14959
14960                 if (val != TG3_EEPROM_MAGIC &&
14961                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14962                         tg3_flag_set(tp, NO_NVRAM);
14963         }
14964 }
14965
14966 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14967 static void tg3_nvram_init(struct tg3 *tp)
14968 {
14969         if (tg3_flag(tp, IS_SSB_CORE)) {
14970                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14971                 tg3_flag_clear(tp, NVRAM);
14972                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14973                 tg3_flag_set(tp, NO_NVRAM);
14974                 return;
14975         }
14976
14977         tw32_f(GRC_EEPROM_ADDR,
14978              (EEPROM_ADDR_FSM_RESET |
14979               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14980                EEPROM_ADDR_CLKPERD_SHIFT)));
14981
14982         msleep(1);
14983
14984         /* Enable seeprom accesses. */
14985         tw32_f(GRC_LOCAL_CTRL,
14986              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14987         udelay(100);
14988
14989         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14990             tg3_asic_rev(tp) != ASIC_REV_5701) {
14991                 tg3_flag_set(tp, NVRAM);
14992
14993                 if (tg3_nvram_lock(tp)) {
14994                         netdev_warn(tp->dev,
14995                                     "Cannot get nvram lock, %s failed\n",
14996                                     __func__);
14997                         return;
14998                 }
14999                 tg3_enable_nvram_access(tp);
15000
15001                 tp->nvram_size = 0;
15002
15003                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15004                         tg3_get_5752_nvram_info(tp);
15005                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15006                         tg3_get_5755_nvram_info(tp);
15007                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15008                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15009                          tg3_asic_rev(tp) == ASIC_REV_5785)
15010                         tg3_get_5787_nvram_info(tp);
15011                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15012                         tg3_get_5761_nvram_info(tp);
15013                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15014                         tg3_get_5906_nvram_info(tp);
15015                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15016                          tg3_flag(tp, 57765_CLASS))
15017                         tg3_get_57780_nvram_info(tp);
15018                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15019                          tg3_asic_rev(tp) == ASIC_REV_5719)
15020                         tg3_get_5717_nvram_info(tp);
15021                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15022                          tg3_asic_rev(tp) == ASIC_REV_5762)
15023                         tg3_get_5720_nvram_info(tp);
15024                 else
15025                         tg3_get_nvram_info(tp);
15026
15027                 if (tp->nvram_size == 0)
15028                         tg3_get_nvram_size(tp);
15029
15030                 tg3_disable_nvram_access(tp);
15031                 tg3_nvram_unlock(tp);
15032
15033         } else {
15034                 tg3_flag_clear(tp, NVRAM);
15035                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15036
15037                 tg3_get_eeprom_size(tp);
15038         }
15039 }
15040
15041 struct subsys_tbl_ent {
15042         u16 subsys_vendor, subsys_devid;
15043         u32 phy_id;
15044 };
15045
15046 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15047         /* Broadcom boards. */
15048         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15050         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15052         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15054         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15056         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15058         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15059           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15060         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15061           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15062         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15064         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15065           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15066         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15067           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15068         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15069           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15070
15071         /* 3com boards. */
15072         { TG3PCI_SUBVENDOR_ID_3COM,
15073           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15074         { TG3PCI_SUBVENDOR_ID_3COM,
15075           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15076         { TG3PCI_SUBVENDOR_ID_3COM,
15077           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15078         { TG3PCI_SUBVENDOR_ID_3COM,
15079           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15080         { TG3PCI_SUBVENDOR_ID_3COM,
15081           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15082
15083         /* DELL boards. */
15084         { TG3PCI_SUBVENDOR_ID_DELL,
15085           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15086         { TG3PCI_SUBVENDOR_ID_DELL,
15087           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15088         { TG3PCI_SUBVENDOR_ID_DELL,
15089           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15090         { TG3PCI_SUBVENDOR_ID_DELL,
15091           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15092
15093         /* Compaq boards. */
15094         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15095           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15096         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15097           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15098         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15099           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15100         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15101           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15102         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15103           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15104
15105         /* IBM boards. */
15106         { TG3PCI_SUBVENDOR_ID_IBM,
15107           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15108 };
15109
15110 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15111 {
15112         int i;
15113
15114         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15115                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15116                      tp->pdev->subsystem_vendor) &&
15117                     (subsys_id_to_phy_id[i].subsys_devid ==
15118                      tp->pdev->subsystem_device))
15119                         return &subsys_id_to_phy_id[i];
15120         }
15121         return NULL;
15122 }
15123
15124 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15125 {
15126         u32 val;
15127
15128         tp->phy_id = TG3_PHY_ID_INVALID;
15129         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15130
15131         /* Assume an onboard device and WOL capable by default.  */
15132         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15133         tg3_flag_set(tp, WOL_CAP);
15134
15135         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15136                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15137                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15138                         tg3_flag_set(tp, IS_NIC);
15139                 }
15140                 val = tr32(VCPU_CFGSHDW);
15141                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15142                         tg3_flag_set(tp, ASPM_WORKAROUND);
15143                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15144                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15145                         tg3_flag_set(tp, WOL_ENABLE);
15146                         device_set_wakeup_enable(&tp->pdev->dev, true);
15147                 }
15148                 goto done;
15149         }
15150
15151         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15152         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15153                 u32 nic_cfg, led_cfg;
15154                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15155                 u32 nic_phy_id, ver, eeprom_phy_id;
15156                 int eeprom_phy_serdes = 0;
15157
15158                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15159                 tp->nic_sram_data_cfg = nic_cfg;
15160
15161                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15162                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15163                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15164                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15165                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15166                     (ver > 0) && (ver < 0x100))
15167                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15168
15169                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15170                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15171
15172                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15173                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15174                     tg3_asic_rev(tp) == ASIC_REV_5720)
15175                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15176
15177                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15178                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15179                         eeprom_phy_serdes = 1;
15180
15181                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15182                 if (nic_phy_id != 0) {
15183                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15184                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15185
15186                         eeprom_phy_id  = (id1 >> 16) << 10;
15187                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15188                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15189                 } else
15190                         eeprom_phy_id = 0;
15191
15192                 tp->phy_id = eeprom_phy_id;
15193                 if (eeprom_phy_serdes) {
15194                         if (!tg3_flag(tp, 5705_PLUS))
15195                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15196                         else
15197                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15198                 }
15199
15200                 if (tg3_flag(tp, 5750_PLUS))
15201                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15202                                     SHASTA_EXT_LED_MODE_MASK);
15203                 else
15204                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15205
15206                 switch (led_cfg) {
15207                 default:
15208                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15209                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15210                         break;
15211
15212                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15213                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15214                         break;
15215
15216                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15217                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15218
15219                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15220                          * read on some older 5700/5701 bootcode.
15221                          */
15222                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15223                             tg3_asic_rev(tp) == ASIC_REV_5701)
15224                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15225
15226                         break;
15227
15228                 case SHASTA_EXT_LED_SHARED:
15229                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15230                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15231                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15232                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15233                                                  LED_CTRL_MODE_PHY_2);
15234
15235                         if (tg3_flag(tp, 5717_PLUS) ||
15236                             tg3_asic_rev(tp) == ASIC_REV_5762)
15237                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15238                                                 LED_CTRL_BLINK_RATE_MASK;
15239
15240                         break;
15241
15242                 case SHASTA_EXT_LED_MAC:
15243                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15244                         break;
15245
15246                 case SHASTA_EXT_LED_COMBO:
15247                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15248                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15249                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15250                                                  LED_CTRL_MODE_PHY_2);
15251                         break;
15252
15253                 }
15254
15255                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15256                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15257                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15258                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15259
15260                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15261                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15262
15263                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15264                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15265                         if ((tp->pdev->subsystem_vendor ==
15266                              PCI_VENDOR_ID_ARIMA) &&
15267                             (tp->pdev->subsystem_device == 0x205a ||
15268                              tp->pdev->subsystem_device == 0x2063))
15269                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15270                 } else {
15271                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15272                         tg3_flag_set(tp, IS_NIC);
15273                 }
15274
15275                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15276                         tg3_flag_set(tp, ENABLE_ASF);
15277                         if (tg3_flag(tp, 5750_PLUS))
15278                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15279                 }
15280
15281                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15282                     tg3_flag(tp, 5750_PLUS))
15283                         tg3_flag_set(tp, ENABLE_APE);
15284
15285                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15286                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15287                         tg3_flag_clear(tp, WOL_CAP);
15288
15289                 if (tg3_flag(tp, WOL_CAP) &&
15290                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15291                         tg3_flag_set(tp, WOL_ENABLE);
15292                         device_set_wakeup_enable(&tp->pdev->dev, true);
15293                 }
15294
15295                 if (cfg2 & (1 << 17))
15296                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15297
15298                 /* serdes signal pre-emphasis in register 0x590 set by */
15299                 /* bootcode if bit 18 is set */
15300                 if (cfg2 & (1 << 18))
15301                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15302
15303                 if ((tg3_flag(tp, 57765_PLUS) ||
15304                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15305                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15306                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15307                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15308
15309                 if (tg3_flag(tp, PCI_EXPRESS)) {
15310                         u32 cfg3;
15311
15312                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15313                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15314                             !tg3_flag(tp, 57765_PLUS) &&
15315                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15316                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15317                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15318                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15319                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15320                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15321                 }
15322
15323                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15324                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15325                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15326                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15327                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15328                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15329
15330                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15331                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15332         }
15333 done:
15334         if (tg3_flag(tp, WOL_CAP))
15335                 device_set_wakeup_enable(&tp->pdev->dev,
15336                                          tg3_flag(tp, WOL_ENABLE));
15337         else
15338                 device_set_wakeup_capable(&tp->pdev->dev, false);
15339 }
15340
15341 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15342 {
15343         int i, err;
15344         u32 val2, off = offset * 8;
15345
15346         err = tg3_nvram_lock(tp);
15347         if (err)
15348                 return err;
15349
15350         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15351         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15352                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15353         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15354         udelay(10);
15355
15356         for (i = 0; i < 100; i++) {
15357                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15358                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15359                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15360                         break;
15361                 }
15362                 udelay(10);
15363         }
15364
15365         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15366
15367         tg3_nvram_unlock(tp);
15368         if (val2 & APE_OTP_STATUS_CMD_DONE)
15369                 return 0;
15370
15371         return -EBUSY;
15372 }
15373
15374 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15375 {
15376         int i;
15377         u32 val;
15378
15379         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15380         tw32(OTP_CTRL, cmd);
15381
15382         /* Wait for up to 1 ms for command to execute. */
15383         for (i = 0; i < 100; i++) {
15384                 val = tr32(OTP_STATUS);
15385                 if (val & OTP_STATUS_CMD_DONE)
15386                         break;
15387                 udelay(10);
15388         }
15389
15390         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15391 }
15392
15393 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15394  * configuration is a 32-bit value that straddles the alignment boundary.
15395  * We do two 32-bit reads and then shift and merge the results.
15396  */
15397 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15398 {
15399         u32 bhalf_otp, thalf_otp;
15400
15401         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15402
15403         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15404                 return 0;
15405
15406         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15407
15408         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15409                 return 0;
15410
15411         thalf_otp = tr32(OTP_READ_DATA);
15412
15413         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15414
15415         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15416                 return 0;
15417
15418         bhalf_otp = tr32(OTP_READ_DATA);
15419
15420         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15421 }
15422
15423 static void tg3_phy_init_link_config(struct tg3 *tp)
15424 {
15425         u32 adv = ADVERTISED_Autoneg;
15426
15427         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15428                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15429                         adv |= ADVERTISED_1000baseT_Half;
15430                 adv |= ADVERTISED_1000baseT_Full;
15431         }
15432
15433         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15434                 adv |= ADVERTISED_100baseT_Half |
15435                        ADVERTISED_100baseT_Full |
15436                        ADVERTISED_10baseT_Half |
15437                        ADVERTISED_10baseT_Full |
15438                        ADVERTISED_TP;
15439         else
15440                 adv |= ADVERTISED_FIBRE;
15441
15442         tp->link_config.advertising = adv;
15443         tp->link_config.speed = SPEED_UNKNOWN;
15444         tp->link_config.duplex = DUPLEX_UNKNOWN;
15445         tp->link_config.autoneg = AUTONEG_ENABLE;
15446         tp->link_config.active_speed = SPEED_UNKNOWN;
15447         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15448
15449         tp->old_link = -1;
15450 }
15451
15452 static int tg3_phy_probe(struct tg3 *tp)
15453 {
15454         u32 hw_phy_id_1, hw_phy_id_2;
15455         u32 hw_phy_id, hw_phy_id_masked;
15456         int err;
15457
15458         /* flow control autonegotiation is default behavior */
15459         tg3_flag_set(tp, PAUSE_AUTONEG);
15460         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15461
15462         if (tg3_flag(tp, ENABLE_APE)) {
15463                 switch (tp->pci_fn) {
15464                 case 0:
15465                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15466                         break;
15467                 case 1:
15468                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15469                         break;
15470                 case 2:
15471                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15472                         break;
15473                 case 3:
15474                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15475                         break;
15476                 }
15477         }
15478
15479         if (!tg3_flag(tp, ENABLE_ASF) &&
15480             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15481             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15482                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15483                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15484
15485         if (tg3_flag(tp, USE_PHYLIB))
15486                 return tg3_phy_init(tp);
15487
15488         /* Reading the PHY ID register can conflict with ASF
15489          * firmware access to the PHY hardware.
15490          */
15491         err = 0;
15492         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15493                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15494         } else {
15495                 /* Now read the physical PHY_ID from the chip and verify
15496                  * that it is sane.  If it doesn't look good, we fall back
15497                  * to either the hard-coded table based PHY_ID and failing
15498                  * that the value found in the eeprom area.
15499                  */
15500                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15501                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15502
15503                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15504                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15505                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15506
15507                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15508         }
15509
15510         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15511                 tp->phy_id = hw_phy_id;
15512                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15513                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15514                 else
15515                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15516         } else {
15517                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15518                         /* Do nothing, phy ID already set up in
15519                          * tg3_get_eeprom_hw_cfg().
15520                          */
15521                 } else {
15522                         struct subsys_tbl_ent *p;
15523
15524                         /* No eeprom signature?  Try the hardcoded
15525                          * subsys device table.
15526                          */
15527                         p = tg3_lookup_by_subsys(tp);
15528                         if (p) {
15529                                 tp->phy_id = p->phy_id;
15530                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15531                                 /* For now we saw the IDs 0xbc050cd0,
15532                                  * 0xbc050f80 and 0xbc050c30 on devices
15533                                  * connected to an BCM4785 and there are
15534                                  * probably more. Just assume that the phy is
15535                                  * supported when it is connected to a SSB core
15536                                  * for now.
15537                                  */
15538                                 return -ENODEV;
15539                         }
15540
15541                         if (!tp->phy_id ||
15542                             tp->phy_id == TG3_PHY_ID_BCM8002)
15543                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15544                 }
15545         }
15546
15547         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15548             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15549              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15550              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15551              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15552              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15553               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15554              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15555               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15556                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15557
15558                 tp->eee.supported = SUPPORTED_100baseT_Full |
15559                                     SUPPORTED_1000baseT_Full;
15560                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15561                                      ADVERTISED_1000baseT_Full;
15562                 tp->eee.eee_enabled = 1;
15563                 tp->eee.tx_lpi_enabled = 1;
15564                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15565         }
15566
15567         tg3_phy_init_link_config(tp);
15568
15569         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15570             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15571             !tg3_flag(tp, ENABLE_APE) &&
15572             !tg3_flag(tp, ENABLE_ASF)) {
15573                 u32 bmsr, dummy;
15574
15575                 tg3_readphy(tp, MII_BMSR, &bmsr);
15576                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15577                     (bmsr & BMSR_LSTATUS))
15578                         goto skip_phy_reset;
15579
15580                 err = tg3_phy_reset(tp);
15581                 if (err)
15582                         return err;
15583
15584                 tg3_phy_set_wirespeed(tp);
15585
15586                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15587                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15588                                             tp->link_config.flowctrl);
15589
15590                         tg3_writephy(tp, MII_BMCR,
15591                                      BMCR_ANENABLE | BMCR_ANRESTART);
15592                 }
15593         }
15594
15595 skip_phy_reset:
15596         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15597                 err = tg3_init_5401phy_dsp(tp);
15598                 if (err)
15599                         return err;
15600
15601                 err = tg3_init_5401phy_dsp(tp);
15602         }
15603
15604         return err;
15605 }
15606
15607 static void tg3_read_vpd(struct tg3 *tp)
15608 {
15609         u8 *vpd_data;
15610         unsigned int block_end, rosize, len;
15611         u32 vpdlen;
15612         int j, i = 0;
15613
15614         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15615         if (!vpd_data)
15616                 goto out_no_vpd;
15617
15618         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15619         if (i < 0)
15620                 goto out_not_found;
15621
15622         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15623         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15624         i += PCI_VPD_LRDT_TAG_SIZE;
15625
15626         if (block_end > vpdlen)
15627                 goto out_not_found;
15628
15629         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15630                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15631         if (j > 0) {
15632                 len = pci_vpd_info_field_size(&vpd_data[j]);
15633
15634                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15635                 if (j + len > block_end || len != 4 ||
15636                     memcmp(&vpd_data[j], "1028", 4))
15637                         goto partno;
15638
15639                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15640                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15641                 if (j < 0)
15642                         goto partno;
15643
15644                 len = pci_vpd_info_field_size(&vpd_data[j]);
15645
15646                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15647                 if (j + len > block_end)
15648                         goto partno;
15649
15650                 if (len >= sizeof(tp->fw_ver))
15651                         len = sizeof(tp->fw_ver) - 1;
15652                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15653                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15654                          &vpd_data[j]);
15655         }
15656
15657 partno:
15658         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15659                                       PCI_VPD_RO_KEYWORD_PARTNO);
15660         if (i < 0)
15661                 goto out_not_found;
15662
15663         len = pci_vpd_info_field_size(&vpd_data[i]);
15664
15665         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15666         if (len > TG3_BPN_SIZE ||
15667             (len + i) > vpdlen)
15668                 goto out_not_found;
15669
15670         memcpy(tp->board_part_number, &vpd_data[i], len);
15671
15672 out_not_found:
15673         kfree(vpd_data);
15674         if (tp->board_part_number[0])
15675                 return;
15676
15677 out_no_vpd:
15678         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15679                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15680                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15681                         strcpy(tp->board_part_number, "BCM5717");
15682                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15683                         strcpy(tp->board_part_number, "BCM5718");
15684                 else
15685                         goto nomatch;
15686         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15687                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15688                         strcpy(tp->board_part_number, "BCM57780");
15689                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15690                         strcpy(tp->board_part_number, "BCM57760");
15691                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15692                         strcpy(tp->board_part_number, "BCM57790");
15693                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15694                         strcpy(tp->board_part_number, "BCM57788");
15695                 else
15696                         goto nomatch;
15697         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15698                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15699                         strcpy(tp->board_part_number, "BCM57761");
15700                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15701                         strcpy(tp->board_part_number, "BCM57765");
15702                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15703                         strcpy(tp->board_part_number, "BCM57781");
15704                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15705                         strcpy(tp->board_part_number, "BCM57785");
15706                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15707                         strcpy(tp->board_part_number, "BCM57791");
15708                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15709                         strcpy(tp->board_part_number, "BCM57795");
15710                 else
15711                         goto nomatch;
15712         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15713                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15714                         strcpy(tp->board_part_number, "BCM57762");
15715                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15716                         strcpy(tp->board_part_number, "BCM57766");
15717                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15718                         strcpy(tp->board_part_number, "BCM57782");
15719                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15720                         strcpy(tp->board_part_number, "BCM57786");
15721                 else
15722                         goto nomatch;
15723         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15724                 strcpy(tp->board_part_number, "BCM95906");
15725         } else {
15726 nomatch:
15727                 strcpy(tp->board_part_number, "none");
15728         }
15729 }
15730
15731 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15732 {
15733         u32 val;
15734
15735         if (tg3_nvram_read(tp, offset, &val) ||
15736             (val & 0xfc000000) != 0x0c000000 ||
15737             tg3_nvram_read(tp, offset + 4, &val) ||
15738             val != 0)
15739                 return 0;
15740
15741         return 1;
15742 }
15743
15744 static void tg3_read_bc_ver(struct tg3 *tp)
15745 {
15746         u32 val, offset, start, ver_offset;
15747         int i, dst_off;
15748         bool newver = false;
15749
15750         if (tg3_nvram_read(tp, 0xc, &offset) ||
15751             tg3_nvram_read(tp, 0x4, &start))
15752                 return;
15753
15754         offset = tg3_nvram_logical_addr(tp, offset);
15755
15756         if (tg3_nvram_read(tp, offset, &val))
15757                 return;
15758
15759         if ((val & 0xfc000000) == 0x0c000000) {
15760                 if (tg3_nvram_read(tp, offset + 4, &val))
15761                         return;
15762
15763                 if (val == 0)
15764                         newver = true;
15765         }
15766
15767         dst_off = strlen(tp->fw_ver);
15768
15769         if (newver) {
15770                 if (TG3_VER_SIZE - dst_off < 16 ||
15771                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15772                         return;
15773
15774                 offset = offset + ver_offset - start;
15775                 for (i = 0; i < 16; i += 4) {
15776                         __be32 v;
15777                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15778                                 return;
15779
15780                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15781                 }
15782         } else {
15783                 u32 major, minor;
15784
15785                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15786                         return;
15787
15788                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15789                         TG3_NVM_BCVER_MAJSFT;
15790                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15791                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15792                          "v%d.%02d", major, minor);
15793         }
15794 }
15795
15796 static void tg3_read_hwsb_ver(struct tg3 *tp)
15797 {
15798         u32 val, major, minor;
15799
15800         /* Use native endian representation */
15801         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15802                 return;
15803
15804         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15805                 TG3_NVM_HWSB_CFG1_MAJSFT;
15806         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15807                 TG3_NVM_HWSB_CFG1_MINSFT;
15808
15809         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15810 }
15811
15812 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15813 {
15814         u32 offset, major, minor, build;
15815
15816         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15817
15818         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15819                 return;
15820
15821         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15822         case TG3_EEPROM_SB_REVISION_0:
15823                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15824                 break;
15825         case TG3_EEPROM_SB_REVISION_2:
15826                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15827                 break;
15828         case TG3_EEPROM_SB_REVISION_3:
15829                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15830                 break;
15831         case TG3_EEPROM_SB_REVISION_4:
15832                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15833                 break;
15834         case TG3_EEPROM_SB_REVISION_5:
15835                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15836                 break;
15837         case TG3_EEPROM_SB_REVISION_6:
15838                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15839                 break;
15840         default:
15841                 return;
15842         }
15843
15844         if (tg3_nvram_read(tp, offset, &val))
15845                 return;
15846
15847         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15848                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15849         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15850                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15851         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15852
15853         if (minor > 99 || build > 26)
15854                 return;
15855
15856         offset = strlen(tp->fw_ver);
15857         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15858                  " v%d.%02d", major, minor);
15859
15860         if (build > 0) {
15861                 offset = strlen(tp->fw_ver);
15862                 if (offset < TG3_VER_SIZE - 1)
15863                         tp->fw_ver[offset] = 'a' + build - 1;
15864         }
15865 }
15866
15867 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15868 {
15869         u32 val, offset, start;
15870         int i, vlen;
15871
15872         for (offset = TG3_NVM_DIR_START;
15873              offset < TG3_NVM_DIR_END;
15874              offset += TG3_NVM_DIRENT_SIZE) {
15875                 if (tg3_nvram_read(tp, offset, &val))
15876                         return;
15877
15878                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15879                         break;
15880         }
15881
15882         if (offset == TG3_NVM_DIR_END)
15883                 return;
15884
15885         if (!tg3_flag(tp, 5705_PLUS))
15886                 start = 0x08000000;
15887         else if (tg3_nvram_read(tp, offset - 4, &start))
15888                 return;
15889
15890         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15891             !tg3_fw_img_is_valid(tp, offset) ||
15892             tg3_nvram_read(tp, offset + 8, &val))
15893                 return;
15894
15895         offset += val - start;
15896
15897         vlen = strlen(tp->fw_ver);
15898
15899         tp->fw_ver[vlen++] = ',';
15900         tp->fw_ver[vlen++] = ' ';
15901
15902         for (i = 0; i < 4; i++) {
15903                 __be32 v;
15904                 if (tg3_nvram_read_be32(tp, offset, &v))
15905                         return;
15906
15907                 offset += sizeof(v);
15908
15909                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15910                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15911                         break;
15912                 }
15913
15914                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15915                 vlen += sizeof(v);
15916         }
15917 }
15918
15919 static void tg3_probe_ncsi(struct tg3 *tp)
15920 {
15921         u32 apedata;
15922
15923         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15924         if (apedata != APE_SEG_SIG_MAGIC)
15925                 return;
15926
15927         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15928         if (!(apedata & APE_FW_STATUS_READY))
15929                 return;
15930
15931         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15932                 tg3_flag_set(tp, APE_HAS_NCSI);
15933 }
15934
15935 static void tg3_read_dash_ver(struct tg3 *tp)
15936 {
15937         int vlen;
15938         u32 apedata;
15939         char *fwtype;
15940
15941         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15942
15943         if (tg3_flag(tp, APE_HAS_NCSI))
15944                 fwtype = "NCSI";
15945         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15946                 fwtype = "SMASH";
15947         else
15948                 fwtype = "DASH";
15949
15950         vlen = strlen(tp->fw_ver);
15951
15952         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15953                  fwtype,
15954                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15955                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15956                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15957                  (apedata & APE_FW_VERSION_BLDMSK));
15958 }
15959
15960 static void tg3_read_otp_ver(struct tg3 *tp)
15961 {
15962         u32 val, val2;
15963
15964         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15965                 return;
15966
15967         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15968             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15969             TG3_OTP_MAGIC0_VALID(val)) {
15970                 u64 val64 = (u64) val << 32 | val2;
15971                 u32 ver = 0;
15972                 int i, vlen;
15973
15974                 for (i = 0; i < 7; i++) {
15975                         if ((val64 & 0xff) == 0)
15976                                 break;
15977                         ver = val64 & 0xff;
15978                         val64 >>= 8;
15979                 }
15980                 vlen = strlen(tp->fw_ver);
15981                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15982         }
15983 }
15984
15985 static void tg3_read_fw_ver(struct tg3 *tp)
15986 {
15987         u32 val;
15988         bool vpd_vers = false;
15989
15990         if (tp->fw_ver[0] != 0)
15991                 vpd_vers = true;
15992
15993         if (tg3_flag(tp, NO_NVRAM)) {
15994                 strcat(tp->fw_ver, "sb");
15995                 tg3_read_otp_ver(tp);
15996                 return;
15997         }
15998
15999         if (tg3_nvram_read(tp, 0, &val))
16000                 return;
16001
16002         if (val == TG3_EEPROM_MAGIC)
16003                 tg3_read_bc_ver(tp);
16004         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16005                 tg3_read_sb_ver(tp, val);
16006         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16007                 tg3_read_hwsb_ver(tp);
16008
16009         if (tg3_flag(tp, ENABLE_ASF)) {
16010                 if (tg3_flag(tp, ENABLE_APE)) {
16011                         tg3_probe_ncsi(tp);
16012                         if (!vpd_vers)
16013                                 tg3_read_dash_ver(tp);
16014                 } else if (!vpd_vers) {
16015                         tg3_read_mgmtfw_ver(tp);
16016                 }
16017         }
16018
16019         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16020 }
16021
16022 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16023 {
16024         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16025                 return TG3_RX_RET_MAX_SIZE_5717;
16026         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16027                 return TG3_RX_RET_MAX_SIZE_5700;
16028         else
16029                 return TG3_RX_RET_MAX_SIZE_5705;
16030 }
16031
16032 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16033         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16034         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16035         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16036         { },
16037 };
16038
16039 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16040 {
16041         struct pci_dev *peer;
16042         unsigned int func, devnr = tp->pdev->devfn & ~7;
16043
16044         for (func = 0; func < 8; func++) {
16045                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16046                 if (peer && peer != tp->pdev)
16047                         break;
16048                 pci_dev_put(peer);
16049         }
16050         /* 5704 can be configured in single-port mode, set peer to
16051          * tp->pdev in that case.
16052          */
16053         if (!peer) {
16054                 peer = tp->pdev;
16055                 return peer;
16056         }
16057
16058         /*
16059          * We don't need to keep the refcount elevated; there's no way
16060          * to remove one half of this device without removing the other
16061          */
16062         pci_dev_put(peer);
16063
16064         return peer;
16065 }
16066
16067 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16068 {
16069         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16070         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16071                 u32 reg;
16072
16073                 /* All devices that use the alternate
16074                  * ASIC REV location have a CPMU.
16075                  */
16076                 tg3_flag_set(tp, CPMU_PRESENT);
16077
16078                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16079                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16080                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16081                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16082                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16083                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16084                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16085                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16086                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16087                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16088                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16089                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16090                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16091                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16092                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16093                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16094                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16095                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16096                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16097                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16098                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16099                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16100                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16101                 else
16102                         reg = TG3PCI_PRODID_ASICREV;
16103
16104                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16105         }
16106
16107         /* Wrong chip ID in 5752 A0. This code can be removed later
16108          * as A0 is not in production.
16109          */
16110         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16111                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16112
16113         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16114                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16115
16116         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16117             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16118             tg3_asic_rev(tp) == ASIC_REV_5720)
16119                 tg3_flag_set(tp, 5717_PLUS);
16120
16121         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16122             tg3_asic_rev(tp) == ASIC_REV_57766)
16123                 tg3_flag_set(tp, 57765_CLASS);
16124
16125         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16126              tg3_asic_rev(tp) == ASIC_REV_5762)
16127                 tg3_flag_set(tp, 57765_PLUS);
16128
16129         /* Intentionally exclude ASIC_REV_5906 */
16130         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16131             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16132             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16133             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16134             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16135             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16136             tg3_flag(tp, 57765_PLUS))
16137                 tg3_flag_set(tp, 5755_PLUS);
16138
16139         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16140             tg3_asic_rev(tp) == ASIC_REV_5714)
16141                 tg3_flag_set(tp, 5780_CLASS);
16142
16143         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16144             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16145             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16146             tg3_flag(tp, 5755_PLUS) ||
16147             tg3_flag(tp, 5780_CLASS))
16148                 tg3_flag_set(tp, 5750_PLUS);
16149
16150         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16151             tg3_flag(tp, 5750_PLUS))
16152                 tg3_flag_set(tp, 5705_PLUS);
16153 }
16154
16155 static bool tg3_10_100_only_device(struct tg3 *tp,
16156                                    const struct pci_device_id *ent)
16157 {
16158         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16159
16160         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16161              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16162             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16163                 return true;
16164
16165         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16166                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16167                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16168                                 return true;
16169                 } else {
16170                         return true;
16171                 }
16172         }
16173
16174         return false;
16175 }
16176
16177 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16178 {
16179         u32 misc_ctrl_reg;
16180         u32 pci_state_reg, grc_misc_cfg;
16181         u32 val;
16182         u16 pci_cmd;
16183         int err;
16184
16185         /* Force memory write invalidate off.  If we leave it on,
16186          * then on 5700_BX chips we have to enable a workaround.
16187          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16188          * to match the cacheline size.  The Broadcom driver have this
16189          * workaround but turns MWI off all the times so never uses
16190          * it.  This seems to suggest that the workaround is insufficient.
16191          */
16192         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16193         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16194         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16195
16196         /* Important! -- Make sure register accesses are byteswapped
16197          * correctly.  Also, for those chips that require it, make
16198          * sure that indirect register accesses are enabled before
16199          * the first operation.
16200          */
16201         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16202                               &misc_ctrl_reg);
16203         tp->misc_host_ctrl |= (misc_ctrl_reg &
16204                                MISC_HOST_CTRL_CHIPREV);
16205         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16206                                tp->misc_host_ctrl);
16207
16208         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16209
16210         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16211          * we need to disable memory and use config. cycles
16212          * only to access all registers. The 5702/03 chips
16213          * can mistakenly decode the special cycles from the
16214          * ICH chipsets as memory write cycles, causing corruption
16215          * of register and memory space. Only certain ICH bridges
16216          * will drive special cycles with non-zero data during the
16217          * address phase which can fall within the 5703's address
16218          * range. This is not an ICH bug as the PCI spec allows
16219          * non-zero address during special cycles. However, only
16220          * these ICH bridges are known to drive non-zero addresses
16221          * during special cycles.
16222          *
16223          * Since special cycles do not cross PCI bridges, we only
16224          * enable this workaround if the 5703 is on the secondary
16225          * bus of these ICH bridges.
16226          */
16227         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16228             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16229                 static struct tg3_dev_id {
16230                         u32     vendor;
16231                         u32     device;
16232                         u32     rev;
16233                 } ich_chipsets[] = {
16234                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16235                           PCI_ANY_ID },
16236                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16237                           PCI_ANY_ID },
16238                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16239                           0xa },
16240                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16241                           PCI_ANY_ID },
16242                         { },
16243                 };
16244                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16245                 struct pci_dev *bridge = NULL;
16246
16247                 while (pci_id->vendor != 0) {
16248                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16249                                                 bridge);
16250                         if (!bridge) {
16251                                 pci_id++;
16252                                 continue;
16253                         }
16254                         if (pci_id->rev != PCI_ANY_ID) {
16255                                 if (bridge->revision > pci_id->rev)
16256                                         continue;
16257                         }
16258                         if (bridge->subordinate &&
16259                             (bridge->subordinate->number ==
16260                              tp->pdev->bus->number)) {
16261                                 tg3_flag_set(tp, ICH_WORKAROUND);
16262                                 pci_dev_put(bridge);
16263                                 break;
16264                         }
16265                 }
16266         }
16267
16268         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16269                 static struct tg3_dev_id {
16270                         u32     vendor;
16271                         u32     device;
16272                 } bridge_chipsets[] = {
16273                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16274                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16275                         { },
16276                 };
16277                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16278                 struct pci_dev *bridge = NULL;
16279
16280                 while (pci_id->vendor != 0) {
16281                         bridge = pci_get_device(pci_id->vendor,
16282                                                 pci_id->device,
16283                                                 bridge);
16284                         if (!bridge) {
16285                                 pci_id++;
16286                                 continue;
16287                         }
16288                         if (bridge->subordinate &&
16289                             (bridge->subordinate->number <=
16290                              tp->pdev->bus->number) &&
16291                             (bridge->subordinate->busn_res.end >=
16292                              tp->pdev->bus->number)) {
16293                                 tg3_flag_set(tp, 5701_DMA_BUG);
16294                                 pci_dev_put(bridge);
16295                                 break;
16296                         }
16297                 }
16298         }
16299
16300         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16301          * DMA addresses > 40-bit. This bridge may have other additional
16302          * 57xx devices behind it in some 4-port NIC designs for example.
16303          * Any tg3 device found behind the bridge will also need the 40-bit
16304          * DMA workaround.
16305          */
16306         if (tg3_flag(tp, 5780_CLASS)) {
16307                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16308                 tp->msi_cap = tp->pdev->msi_cap;
16309         } else {
16310                 struct pci_dev *bridge = NULL;
16311
16312                 do {
16313                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16314                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16315                                                 bridge);
16316                         if (bridge && bridge->subordinate &&
16317                             (bridge->subordinate->number <=
16318                              tp->pdev->bus->number) &&
16319                             (bridge->subordinate->busn_res.end >=
16320                              tp->pdev->bus->number)) {
16321                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16322                                 pci_dev_put(bridge);
16323                                 break;
16324                         }
16325                 } while (bridge);
16326         }
16327
16328         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16329             tg3_asic_rev(tp) == ASIC_REV_5714)
16330                 tp->pdev_peer = tg3_find_peer(tp);
16331
16332         /* Determine TSO capabilities */
16333         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16334                 ; /* Do nothing. HW bug. */
16335         else if (tg3_flag(tp, 57765_PLUS))
16336                 tg3_flag_set(tp, HW_TSO_3);
16337         else if (tg3_flag(tp, 5755_PLUS) ||
16338                  tg3_asic_rev(tp) == ASIC_REV_5906)
16339                 tg3_flag_set(tp, HW_TSO_2);
16340         else if (tg3_flag(tp, 5750_PLUS)) {
16341                 tg3_flag_set(tp, HW_TSO_1);
16342                 tg3_flag_set(tp, TSO_BUG);
16343                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16344                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16345                         tg3_flag_clear(tp, TSO_BUG);
16346         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16347                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16348                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16349                 tg3_flag_set(tp, FW_TSO);
16350                 tg3_flag_set(tp, TSO_BUG);
16351                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16352                         tp->fw_needed = FIRMWARE_TG3TSO5;
16353                 else
16354                         tp->fw_needed = FIRMWARE_TG3TSO;
16355         }
16356
16357         /* Selectively allow TSO based on operating conditions */
16358         if (tg3_flag(tp, HW_TSO_1) ||
16359             tg3_flag(tp, HW_TSO_2) ||
16360             tg3_flag(tp, HW_TSO_3) ||
16361             tg3_flag(tp, FW_TSO)) {
16362                 /* For firmware TSO, assume ASF is disabled.
16363                  * We'll disable TSO later if we discover ASF
16364                  * is enabled in tg3_get_eeprom_hw_cfg().
16365                  */
16366                 tg3_flag_set(tp, TSO_CAPABLE);
16367         } else {
16368                 tg3_flag_clear(tp, TSO_CAPABLE);
16369                 tg3_flag_clear(tp, TSO_BUG);
16370                 tp->fw_needed = NULL;
16371         }
16372
16373         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16374                 tp->fw_needed = FIRMWARE_TG3;
16375
16376         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16377                 tp->fw_needed = FIRMWARE_TG357766;
16378
16379         tp->irq_max = 1;
16380
16381         if (tg3_flag(tp, 5750_PLUS)) {
16382                 tg3_flag_set(tp, SUPPORT_MSI);
16383                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16384                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16385                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16386                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16387                      tp->pdev_peer == tp->pdev))
16388                         tg3_flag_clear(tp, SUPPORT_MSI);
16389
16390                 if (tg3_flag(tp, 5755_PLUS) ||
16391                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16392                         tg3_flag_set(tp, 1SHOT_MSI);
16393                 }
16394
16395                 if (tg3_flag(tp, 57765_PLUS)) {
16396                         tg3_flag_set(tp, SUPPORT_MSIX);
16397                         tp->irq_max = TG3_IRQ_MAX_VECS;
16398                 }
16399         }
16400
16401         tp->txq_max = 1;
16402         tp->rxq_max = 1;
16403         if (tp->irq_max > 1) {
16404                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16405                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16406
16407                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16408                     tg3_asic_rev(tp) == ASIC_REV_5720)
16409                         tp->txq_max = tp->irq_max - 1;
16410         }
16411
16412         if (tg3_flag(tp, 5755_PLUS) ||
16413             tg3_asic_rev(tp) == ASIC_REV_5906)
16414                 tg3_flag_set(tp, SHORT_DMA_BUG);
16415
16416         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16417                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16418
16419         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16420             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16421             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16422             tg3_asic_rev(tp) == ASIC_REV_5762)
16423                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16424
16425         if (tg3_flag(tp, 57765_PLUS) &&
16426             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16427                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16428
16429         if (!tg3_flag(tp, 5705_PLUS) ||
16430             tg3_flag(tp, 5780_CLASS) ||
16431             tg3_flag(tp, USE_JUMBO_BDFLAG))
16432                 tg3_flag_set(tp, JUMBO_CAPABLE);
16433
16434         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16435                               &pci_state_reg);
16436
16437         if (pci_is_pcie(tp->pdev)) {
16438                 u16 lnkctl;
16439
16440                 tg3_flag_set(tp, PCI_EXPRESS);
16441
16442                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16443                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16444                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16445                                 tg3_flag_clear(tp, HW_TSO_2);
16446                                 tg3_flag_clear(tp, TSO_CAPABLE);
16447                         }
16448                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16449                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16450                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16451                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16452                                 tg3_flag_set(tp, CLKREQ_BUG);
16453                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16454                         tg3_flag_set(tp, L1PLLPD_EN);
16455                 }
16456         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16457                 /* BCM5785 devices are effectively PCIe devices, and should
16458                  * follow PCIe codepaths, but do not have a PCIe capabilities
16459                  * section.
16460                  */
16461                 tg3_flag_set(tp, PCI_EXPRESS);
16462         } else if (!tg3_flag(tp, 5705_PLUS) ||
16463                    tg3_flag(tp, 5780_CLASS)) {
16464                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16465                 if (!tp->pcix_cap) {
16466                         dev_err(&tp->pdev->dev,
16467                                 "Cannot find PCI-X capability, aborting\n");
16468                         return -EIO;
16469                 }
16470
16471                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16472                         tg3_flag_set(tp, PCIX_MODE);
16473         }
16474
16475         /* If we have an AMD 762 or VIA K8T800 chipset, write
16476          * reordering to the mailbox registers done by the host
16477          * controller can cause major troubles.  We read back from
16478          * every mailbox register write to force the writes to be
16479          * posted to the chip in order.
16480          */
16481         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16482             !tg3_flag(tp, PCI_EXPRESS))
16483                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16484
16485         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16486                              &tp->pci_cacheline_sz);
16487         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16488                              &tp->pci_lat_timer);
16489         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16490             tp->pci_lat_timer < 64) {
16491                 tp->pci_lat_timer = 64;
16492                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16493                                       tp->pci_lat_timer);
16494         }
16495
16496         /* Important! -- It is critical that the PCI-X hw workaround
16497          * situation is decided before the first MMIO register access.
16498          */
16499         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16500                 /* 5700 BX chips need to have their TX producer index
16501                  * mailboxes written twice to workaround a bug.
16502                  */
16503                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16504
16505                 /* If we are in PCI-X mode, enable register write workaround.
16506                  *
16507                  * The workaround is to use indirect register accesses
16508                  * for all chip writes not to mailbox registers.
16509                  */
16510                 if (tg3_flag(tp, PCIX_MODE)) {
16511                         u32 pm_reg;
16512
16513                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16514
16515                         /* The chip can have it's power management PCI config
16516                          * space registers clobbered due to this bug.
16517                          * So explicitly force the chip into D0 here.
16518                          */
16519                         pci_read_config_dword(tp->pdev,
16520                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16521                                               &pm_reg);
16522                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16523                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16524                         pci_write_config_dword(tp->pdev,
16525                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16526                                                pm_reg);
16527
16528                         /* Also, force SERR#/PERR# in PCI command. */
16529                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16530                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16531                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16532                 }
16533         }
16534
16535         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16536                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16537         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16538                 tg3_flag_set(tp, PCI_32BIT);
16539
16540         /* Chip-specific fixup from Broadcom driver */
16541         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16542             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16543                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16544                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16545         }
16546
16547         /* Default fast path register access methods */
16548         tp->read32 = tg3_read32;
16549         tp->write32 = tg3_write32;
16550         tp->read32_mbox = tg3_read32;
16551         tp->write32_mbox = tg3_write32;
16552         tp->write32_tx_mbox = tg3_write32;
16553         tp->write32_rx_mbox = tg3_write32;
16554
16555         /* Various workaround register access methods */
16556         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16557                 tp->write32 = tg3_write_indirect_reg32;
16558         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16559                  (tg3_flag(tp, PCI_EXPRESS) &&
16560                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16561                 /*
16562                  * Back to back register writes can cause problems on these
16563                  * chips, the workaround is to read back all reg writes
16564                  * except those to mailbox regs.
16565                  *
16566                  * See tg3_write_indirect_reg32().
16567                  */
16568                 tp->write32 = tg3_write_flush_reg32;
16569         }
16570
16571         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16572                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16573                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16574                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16575         }
16576
16577         if (tg3_flag(tp, ICH_WORKAROUND)) {
16578                 tp->read32 = tg3_read_indirect_reg32;
16579                 tp->write32 = tg3_write_indirect_reg32;
16580                 tp->read32_mbox = tg3_read_indirect_mbox;
16581                 tp->write32_mbox = tg3_write_indirect_mbox;
16582                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16583                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16584
16585                 iounmap(tp->regs);
16586                 tp->regs = NULL;
16587
16588                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16589                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16590                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16591         }
16592         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16593                 tp->read32_mbox = tg3_read32_mbox_5906;
16594                 tp->write32_mbox = tg3_write32_mbox_5906;
16595                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16596                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16597         }
16598
16599         if (tp->write32 == tg3_write_indirect_reg32 ||
16600             (tg3_flag(tp, PCIX_MODE) &&
16601              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16602               tg3_asic_rev(tp) == ASIC_REV_5701)))
16603                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16604
16605         /* The memory arbiter has to be enabled in order for SRAM accesses
16606          * to succeed.  Normally on powerup the tg3 chip firmware will make
16607          * sure it is enabled, but other entities such as system netboot
16608          * code might disable it.
16609          */
16610         val = tr32(MEMARB_MODE);
16611         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16612
16613         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16614         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16615             tg3_flag(tp, 5780_CLASS)) {
16616                 if (tg3_flag(tp, PCIX_MODE)) {
16617                         pci_read_config_dword(tp->pdev,
16618                                               tp->pcix_cap + PCI_X_STATUS,
16619                                               &val);
16620                         tp->pci_fn = val & 0x7;
16621                 }
16622         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16623                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16624                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16625                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16626                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16627                         val = tr32(TG3_CPMU_STATUS);
16628
16629                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16630                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16631                 else
16632                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16633                                      TG3_CPMU_STATUS_FSHFT_5719;
16634         }
16635
16636         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16637                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16638                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16639         }
16640
16641         /* Get eeprom hw config before calling tg3_set_power_state().
16642          * In particular, the TG3_FLAG_IS_NIC flag must be
16643          * determined before calling tg3_set_power_state() so that
16644          * we know whether or not to switch out of Vaux power.
16645          * When the flag is set, it means that GPIO1 is used for eeprom
16646          * write protect and also implies that it is a LOM where GPIOs
16647          * are not used to switch power.
16648          */
16649         tg3_get_eeprom_hw_cfg(tp);
16650
16651         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16652                 tg3_flag_clear(tp, TSO_CAPABLE);
16653                 tg3_flag_clear(tp, TSO_BUG);
16654                 tp->fw_needed = NULL;
16655         }
16656
16657         if (tg3_flag(tp, ENABLE_APE)) {
16658                 /* Allow reads and writes to the
16659                  * APE register and memory space.
16660                  */
16661                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16662                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16663                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16664                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16665                                        pci_state_reg);
16666
16667                 tg3_ape_lock_init(tp);
16668                 tp->ape_hb_interval =
16669                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16670         }
16671
16672         /* Set up tp->grc_local_ctrl before calling
16673          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16674          * will bring 5700's external PHY out of reset.
16675          * It is also used as eeprom write protect on LOMs.
16676          */
16677         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16678         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16679             tg3_flag(tp, EEPROM_WRITE_PROT))
16680                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16681                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16682         /* Unused GPIO3 must be driven as output on 5752 because there
16683          * are no pull-up resistors on unused GPIO pins.
16684          */
16685         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16686                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16687
16688         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16689             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16690             tg3_flag(tp, 57765_CLASS))
16691                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16692
16693         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16694             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16695                 /* Turn off the debug UART. */
16696                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16697                 if (tg3_flag(tp, IS_NIC))
16698                         /* Keep VMain power. */
16699                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16700                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16701         }
16702
16703         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16704                 tp->grc_local_ctrl |=
16705                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16706
16707         /* Switch out of Vaux if it is a NIC */
16708         tg3_pwrsrc_switch_to_vmain(tp);
16709
16710         /* Derive initial jumbo mode from MTU assigned in
16711          * ether_setup() via the alloc_etherdev() call
16712          */
16713         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16714                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16715
16716         /* Determine WakeOnLan speed to use. */
16717         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16718             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16719             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16720             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16721                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16722         } else {
16723                 tg3_flag_set(tp, WOL_SPEED_100MB);
16724         }
16725
16726         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16727                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16728
16729         /* A few boards don't want Ethernet@WireSpeed phy feature */
16730         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16731             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16732              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16733              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16734             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16735             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16736                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16737
16738         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16739             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16740                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16741         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16742                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16743
16744         if (tg3_flag(tp, 5705_PLUS) &&
16745             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16746             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16747             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16748             !tg3_flag(tp, 57765_PLUS)) {
16749                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16750                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16751                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16752                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16753                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16754                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16755                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16756                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16757                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16758                 } else
16759                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16760         }
16761
16762         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16763             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16764                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16765                 if (tp->phy_otp == 0)
16766                         tp->phy_otp = TG3_OTP_DEFAULT;
16767         }
16768
16769         if (tg3_flag(tp, CPMU_PRESENT))
16770                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16771         else
16772                 tp->mi_mode = MAC_MI_MODE_BASE;
16773
16774         tp->coalesce_mode = 0;
16775         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16776             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16777                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16778
16779         /* Set these bits to enable statistics workaround. */
16780         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16781             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16782             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16783             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16784                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16785                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16786         }
16787
16788         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16789             tg3_asic_rev(tp) == ASIC_REV_57780)
16790                 tg3_flag_set(tp, USE_PHYLIB);
16791
16792         err = tg3_mdio_init(tp);
16793         if (err)
16794                 return err;
16795
16796         /* Initialize data/descriptor byte/word swapping. */
16797         val = tr32(GRC_MODE);
16798         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16799             tg3_asic_rev(tp) == ASIC_REV_5762)
16800                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16801                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16802                         GRC_MODE_B2HRX_ENABLE |
16803                         GRC_MODE_HTX2B_ENABLE |
16804                         GRC_MODE_HOST_STACKUP);
16805         else
16806                 val &= GRC_MODE_HOST_STACKUP;
16807
16808         tw32(GRC_MODE, val | tp->grc_mode);
16809
16810         tg3_switch_clocks(tp);
16811
16812         /* Clear this out for sanity. */
16813         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16814
16815         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16816         tw32(TG3PCI_REG_BASE_ADDR, 0);
16817
16818         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16819                               &pci_state_reg);
16820         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16821             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16822                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16823                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16824                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16825                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16826                         void __iomem *sram_base;
16827
16828                         /* Write some dummy words into the SRAM status block
16829                          * area, see if it reads back correctly.  If the return
16830                          * value is bad, force enable the PCIX workaround.
16831                          */
16832                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16833
16834                         writel(0x00000000, sram_base);
16835                         writel(0x00000000, sram_base + 4);
16836                         writel(0xffffffff, sram_base + 4);
16837                         if (readl(sram_base) != 0x00000000)
16838                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16839                 }
16840         }
16841
16842         udelay(50);
16843         tg3_nvram_init(tp);
16844
16845         /* If the device has an NVRAM, no need to load patch firmware */
16846         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16847             !tg3_flag(tp, NO_NVRAM))
16848                 tp->fw_needed = NULL;
16849
16850         grc_misc_cfg = tr32(GRC_MISC_CFG);
16851         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16852
16853         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16854             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16855              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16856                 tg3_flag_set(tp, IS_5788);
16857
16858         if (!tg3_flag(tp, IS_5788) &&
16859             tg3_asic_rev(tp) != ASIC_REV_5700)
16860                 tg3_flag_set(tp, TAGGED_STATUS);
16861         if (tg3_flag(tp, TAGGED_STATUS)) {
16862                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16863                                       HOSTCC_MODE_CLRTICK_TXBD);
16864
16865                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16866                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16867                                        tp->misc_host_ctrl);
16868         }
16869
16870         /* Preserve the APE MAC_MODE bits */
16871         if (tg3_flag(tp, ENABLE_APE))
16872                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16873         else
16874                 tp->mac_mode = 0;
16875
16876         if (tg3_10_100_only_device(tp, ent))
16877                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16878
16879         err = tg3_phy_probe(tp);
16880         if (err) {
16881                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16882                 /* ... but do not return immediately ... */
16883                 tg3_mdio_fini(tp);
16884         }
16885
16886         tg3_read_vpd(tp);
16887         tg3_read_fw_ver(tp);
16888
16889         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16890                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16891         } else {
16892                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16893                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16894                 else
16895                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16896         }
16897
16898         /* 5700 {AX,BX} chips have a broken status block link
16899          * change bit implementation, so we must use the
16900          * status register in those cases.
16901          */
16902         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16903                 tg3_flag_set(tp, USE_LINKCHG_REG);
16904         else
16905                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16906
16907         /* The led_ctrl is set during tg3_phy_probe, here we might
16908          * have to force the link status polling mechanism based
16909          * upon subsystem IDs.
16910          */
16911         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16912             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16913             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16914                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16915                 tg3_flag_set(tp, USE_LINKCHG_REG);
16916         }
16917
16918         /* For all SERDES we poll the MAC status register. */
16919         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16920                 tg3_flag_set(tp, POLL_SERDES);
16921         else
16922                 tg3_flag_clear(tp, POLL_SERDES);
16923
16924         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16925                 tg3_flag_set(tp, POLL_CPMU_LINK);
16926
16927         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16928         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16929         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16930             tg3_flag(tp, PCIX_MODE)) {
16931                 tp->rx_offset = NET_SKB_PAD;
16932 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16933                 tp->rx_copy_thresh = ~(u16)0;
16934 #endif
16935         }
16936
16937         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16938         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16939         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16940
16941         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16942
16943         /* Increment the rx prod index on the rx std ring by at most
16944          * 8 for these chips to workaround hw errata.
16945          */
16946         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16947             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16948             tg3_asic_rev(tp) == ASIC_REV_5755)
16949                 tp->rx_std_max_post = 8;
16950
16951         if (tg3_flag(tp, ASPM_WORKAROUND))
16952                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16953                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16954
16955         return err;
16956 }
16957
16958 #ifdef CONFIG_SPARC
16959 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16960 {
16961         struct net_device *dev = tp->dev;
16962         struct pci_dev *pdev = tp->pdev;
16963         struct device_node *dp = pci_device_to_OF_node(pdev);
16964         const unsigned char *addr;
16965         int len;
16966
16967         addr = of_get_property(dp, "local-mac-address", &len);
16968         if (addr && len == ETH_ALEN) {
16969                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16970                 return 0;
16971         }
16972         return -ENODEV;
16973 }
16974
16975 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16976 {
16977         struct net_device *dev = tp->dev;
16978
16979         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16980         return 0;
16981 }
16982 #endif
16983
16984 static int tg3_get_device_address(struct tg3 *tp)
16985 {
16986         struct net_device *dev = tp->dev;
16987         u32 hi, lo, mac_offset;
16988         int addr_ok = 0;
16989         int err;
16990
16991 #ifdef CONFIG_SPARC
16992         if (!tg3_get_macaddr_sparc(tp))
16993                 return 0;
16994 #endif
16995
16996         if (tg3_flag(tp, IS_SSB_CORE)) {
16997                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16998                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16999                         return 0;
17000         }
17001
17002         mac_offset = 0x7c;
17003         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17004             tg3_flag(tp, 5780_CLASS)) {
17005                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17006                         mac_offset = 0xcc;
17007                 if (tg3_nvram_lock(tp))
17008                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17009                 else
17010                         tg3_nvram_unlock(tp);
17011         } else if (tg3_flag(tp, 5717_PLUS)) {
17012                 if (tp->pci_fn & 1)
17013                         mac_offset = 0xcc;
17014                 if (tp->pci_fn > 1)
17015                         mac_offset += 0x18c;
17016         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17017                 mac_offset = 0x10;
17018
17019         /* First try to get it from MAC address mailbox. */
17020         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17021         if ((hi >> 16) == 0x484b) {
17022                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17023                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17024
17025                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17026                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17027                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17028                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17029                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17030
17031                 /* Some old bootcode may report a 0 MAC address in SRAM */
17032                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17033         }
17034         if (!addr_ok) {
17035                 /* Next, try NVRAM. */
17036                 if (!tg3_flag(tp, NO_NVRAM) &&
17037                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17038                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17039                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17040                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17041                 }
17042                 /* Finally just fetch it out of the MAC control regs. */
17043                 else {
17044                         hi = tr32(MAC_ADDR_0_HIGH);
17045                         lo = tr32(MAC_ADDR_0_LOW);
17046
17047                         dev->dev_addr[5] = lo & 0xff;
17048                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17049                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17050                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17051                         dev->dev_addr[1] = hi & 0xff;
17052                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17053                 }
17054         }
17055
17056         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17057 #ifdef CONFIG_SPARC
17058                 if (!tg3_get_default_macaddr_sparc(tp))
17059                         return 0;
17060 #endif
17061                 return -EINVAL;
17062         }
17063         return 0;
17064 }
17065
17066 #define BOUNDARY_SINGLE_CACHELINE       1
17067 #define BOUNDARY_MULTI_CACHELINE        2
17068
17069 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17070 {
17071         int cacheline_size;
17072         u8 byte;
17073         int goal;
17074
17075         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17076         if (byte == 0)
17077                 cacheline_size = 1024;
17078         else
17079                 cacheline_size = (int) byte * 4;
17080
17081         /* On 5703 and later chips, the boundary bits have no
17082          * effect.
17083          */
17084         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17085             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17086             !tg3_flag(tp, PCI_EXPRESS))
17087                 goto out;
17088
17089 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17090         goal = BOUNDARY_MULTI_CACHELINE;
17091 #else
17092 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17093         goal = BOUNDARY_SINGLE_CACHELINE;
17094 #else
17095         goal = 0;
17096 #endif
17097 #endif
17098
17099         if (tg3_flag(tp, 57765_PLUS)) {
17100                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17101                 goto out;
17102         }
17103
17104         if (!goal)
17105                 goto out;
17106
17107         /* PCI controllers on most RISC systems tend to disconnect
17108          * when a device tries to burst across a cache-line boundary.
17109          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17110          *
17111          * Unfortunately, for PCI-E there are only limited
17112          * write-side controls for this, and thus for reads
17113          * we will still get the disconnects.  We'll also waste
17114          * these PCI cycles for both read and write for chips
17115          * other than 5700 and 5701 which do not implement the
17116          * boundary bits.
17117          */
17118         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17119                 switch (cacheline_size) {
17120                 case 16:
17121                 case 32:
17122                 case 64:
17123                 case 128:
17124                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17125                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17126                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17127                         } else {
17128                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17129                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17130                         }
17131                         break;
17132
17133                 case 256:
17134                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17135                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17136                         break;
17137
17138                 default:
17139                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17140                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17141                         break;
17142                 }
17143         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17144                 switch (cacheline_size) {
17145                 case 16:
17146                 case 32:
17147                 case 64:
17148                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17149                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17150                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17151                                 break;
17152                         }
17153                         /* fallthrough */
17154                 case 128:
17155                 default:
17156                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17157                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17158                         break;
17159                 }
17160         } else {
17161                 switch (cacheline_size) {
17162                 case 16:
17163                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17164                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17165                                         DMA_RWCTRL_WRITE_BNDRY_16);
17166                                 break;
17167                         }
17168                         /* fallthrough */
17169                 case 32:
17170                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17171                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17172                                         DMA_RWCTRL_WRITE_BNDRY_32);
17173                                 break;
17174                         }
17175                         /* fallthrough */
17176                 case 64:
17177                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17178                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17179                                         DMA_RWCTRL_WRITE_BNDRY_64);
17180                                 break;
17181                         }
17182                         /* fallthrough */
17183                 case 128:
17184                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17185                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17186                                         DMA_RWCTRL_WRITE_BNDRY_128);
17187                                 break;
17188                         }
17189                         /* fallthrough */
17190                 case 256:
17191                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17192                                 DMA_RWCTRL_WRITE_BNDRY_256);
17193                         break;
17194                 case 512:
17195                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17196                                 DMA_RWCTRL_WRITE_BNDRY_512);
17197                         break;
17198                 case 1024:
17199                 default:
17200                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17201                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17202                         break;
17203                 }
17204         }
17205
17206 out:
17207         return val;
17208 }
17209
17210 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17211                            int size, bool to_device)
17212 {
17213         struct tg3_internal_buffer_desc test_desc;
17214         u32 sram_dma_descs;
17215         int i, ret;
17216
17217         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17218
17219         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17220         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17221         tw32(RDMAC_STATUS, 0);
17222         tw32(WDMAC_STATUS, 0);
17223
17224         tw32(BUFMGR_MODE, 0);
17225         tw32(FTQ_RESET, 0);
17226
17227         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17228         test_desc.addr_lo = buf_dma & 0xffffffff;
17229         test_desc.nic_mbuf = 0x00002100;
17230         test_desc.len = size;
17231
17232         /*
17233          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17234          * the *second* time the tg3 driver was getting loaded after an
17235          * initial scan.
17236          *
17237          * Broadcom tells me:
17238          *   ...the DMA engine is connected to the GRC block and a DMA
17239          *   reset may affect the GRC block in some unpredictable way...
17240          *   The behavior of resets to individual blocks has not been tested.
17241          *
17242          * Broadcom noted the GRC reset will also reset all sub-components.
17243          */
17244         if (to_device) {
17245                 test_desc.cqid_sqid = (13 << 8) | 2;
17246
17247                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17248                 udelay(40);
17249         } else {
17250                 test_desc.cqid_sqid = (16 << 8) | 7;
17251
17252                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17253                 udelay(40);
17254         }
17255         test_desc.flags = 0x00000005;
17256
17257         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17258                 u32 val;
17259
17260                 val = *(((u32 *)&test_desc) + i);
17261                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17262                                        sram_dma_descs + (i * sizeof(u32)));
17263                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17264         }
17265         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17266
17267         if (to_device)
17268                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17269         else
17270                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17271
17272         ret = -ENODEV;
17273         for (i = 0; i < 40; i++) {
17274                 u32 val;
17275
17276                 if (to_device)
17277                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17278                 else
17279                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17280                 if ((val & 0xffff) == sram_dma_descs) {
17281                         ret = 0;
17282                         break;
17283                 }
17284
17285                 udelay(100);
17286         }
17287
17288         return ret;
17289 }
17290
17291 #define TEST_BUFFER_SIZE        0x2000
17292
17293 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17294         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17295         { },
17296 };
17297
17298 static int tg3_test_dma(struct tg3 *tp)
17299 {
17300         dma_addr_t buf_dma;
17301         u32 *buf, saved_dma_rwctrl;
17302         int ret = 0;
17303
17304         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17305                                  &buf_dma, GFP_KERNEL);
17306         if (!buf) {
17307                 ret = -ENOMEM;
17308                 goto out_nofree;
17309         }
17310
17311         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17312                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17313
17314         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17315
17316         if (tg3_flag(tp, 57765_PLUS))
17317                 goto out;
17318
17319         if (tg3_flag(tp, PCI_EXPRESS)) {
17320                 /* DMA read watermark not used on PCIE */
17321                 tp->dma_rwctrl |= 0x00180000;
17322         } else if (!tg3_flag(tp, PCIX_MODE)) {
17323                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17324                     tg3_asic_rev(tp) == ASIC_REV_5750)
17325                         tp->dma_rwctrl |= 0x003f0000;
17326                 else
17327                         tp->dma_rwctrl |= 0x003f000f;
17328         } else {
17329                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17330                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17331                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17332                         u32 read_water = 0x7;
17333
17334                         /* If the 5704 is behind the EPB bridge, we can
17335                          * do the less restrictive ONE_DMA workaround for
17336                          * better performance.
17337                          */
17338                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17339                             tg3_asic_rev(tp) == ASIC_REV_5704)
17340                                 tp->dma_rwctrl |= 0x8000;
17341                         else if (ccval == 0x6 || ccval == 0x7)
17342                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17343
17344                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17345                                 read_water = 4;
17346                         /* Set bit 23 to enable PCIX hw bug fix */
17347                         tp->dma_rwctrl |=
17348                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17349                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17350                                 (1 << 23);
17351                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17352                         /* 5780 always in PCIX mode */
17353                         tp->dma_rwctrl |= 0x00144000;
17354                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17355                         /* 5714 always in PCIX mode */
17356                         tp->dma_rwctrl |= 0x00148000;
17357                 } else {
17358                         tp->dma_rwctrl |= 0x001b000f;
17359                 }
17360         }
17361         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17362                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17363
17364         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17365             tg3_asic_rev(tp) == ASIC_REV_5704)
17366                 tp->dma_rwctrl &= 0xfffffff0;
17367
17368         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17369             tg3_asic_rev(tp) == ASIC_REV_5701) {
17370                 /* Remove this if it causes problems for some boards. */
17371                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17372
17373                 /* On 5700/5701 chips, we need to set this bit.
17374                  * Otherwise the chip will issue cacheline transactions
17375                  * to streamable DMA memory with not all the byte
17376                  * enables turned on.  This is an error on several
17377                  * RISC PCI controllers, in particular sparc64.
17378                  *
17379                  * On 5703/5704 chips, this bit has been reassigned
17380                  * a different meaning.  In particular, it is used
17381                  * on those chips to enable a PCI-X workaround.
17382                  */
17383                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17384         }
17385
17386         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17387
17388
17389         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17390             tg3_asic_rev(tp) != ASIC_REV_5701)
17391                 goto out;
17392
17393         /* It is best to perform DMA test with maximum write burst size
17394          * to expose the 5700/5701 write DMA bug.
17395          */
17396         saved_dma_rwctrl = tp->dma_rwctrl;
17397         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17398         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17399
17400         while (1) {
17401                 u32 *p = buf, i;
17402
17403                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17404                         p[i] = i;
17405
17406                 /* Send the buffer to the chip. */
17407                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17408                 if (ret) {
17409                         dev_err(&tp->pdev->dev,
17410                                 "%s: Buffer write failed. err = %d\n",
17411                                 __func__, ret);
17412                         break;
17413                 }
17414
17415                 /* Now read it back. */
17416                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17417                 if (ret) {
17418                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17419                                 "err = %d\n", __func__, ret);
17420                         break;
17421                 }
17422
17423                 /* Verify it. */
17424                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17425                         if (p[i] == i)
17426                                 continue;
17427
17428                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17429                             DMA_RWCTRL_WRITE_BNDRY_16) {
17430                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17431                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17432                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17433                                 break;
17434                         } else {
17435                                 dev_err(&tp->pdev->dev,
17436                                         "%s: Buffer corrupted on read back! "
17437                                         "(%d != %d)\n", __func__, p[i], i);
17438                                 ret = -ENODEV;
17439                                 goto out;
17440                         }
17441                 }
17442
17443                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17444                         /* Success. */
17445                         ret = 0;
17446                         break;
17447                 }
17448         }
17449         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17450             DMA_RWCTRL_WRITE_BNDRY_16) {
17451                 /* DMA test passed without adjusting DMA boundary,
17452                  * now look for chipsets that are known to expose the
17453                  * DMA bug without failing the test.
17454                  */
17455                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17456                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17457                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17458                 } else {
17459                         /* Safe to use the calculated DMA boundary. */
17460                         tp->dma_rwctrl = saved_dma_rwctrl;
17461                 }
17462
17463                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17464         }
17465
17466 out:
17467         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17468 out_nofree:
17469         return ret;
17470 }
17471
17472 static void tg3_init_bufmgr_config(struct tg3 *tp)
17473 {
17474         if (tg3_flag(tp, 57765_PLUS)) {
17475                 tp->bufmgr_config.mbuf_read_dma_low_water =
17476                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17477                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17478                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17479                 tp->bufmgr_config.mbuf_high_water =
17480                         DEFAULT_MB_HIGH_WATER_57765;
17481
17482                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17483                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17484                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17485                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17486                 tp->bufmgr_config.mbuf_high_water_jumbo =
17487                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17488         } else if (tg3_flag(tp, 5705_PLUS)) {
17489                 tp->bufmgr_config.mbuf_read_dma_low_water =
17490                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17491                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17492                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17493                 tp->bufmgr_config.mbuf_high_water =
17494                         DEFAULT_MB_HIGH_WATER_5705;
17495                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17496                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17497                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17498                         tp->bufmgr_config.mbuf_high_water =
17499                                 DEFAULT_MB_HIGH_WATER_5906;
17500                 }
17501
17502                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17503                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17504                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17505                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17506                 tp->bufmgr_config.mbuf_high_water_jumbo =
17507                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17508         } else {
17509                 tp->bufmgr_config.mbuf_read_dma_low_water =
17510                         DEFAULT_MB_RDMA_LOW_WATER;
17511                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17512                         DEFAULT_MB_MACRX_LOW_WATER;
17513                 tp->bufmgr_config.mbuf_high_water =
17514                         DEFAULT_MB_HIGH_WATER;
17515
17516                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17517                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17518                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17519                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17520                 tp->bufmgr_config.mbuf_high_water_jumbo =
17521                         DEFAULT_MB_HIGH_WATER_JUMBO;
17522         }
17523
17524         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17525         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17526 }
17527
17528 static char *tg3_phy_string(struct tg3 *tp)
17529 {
17530         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17531         case TG3_PHY_ID_BCM5400:        return "5400";
17532         case TG3_PHY_ID_BCM5401:        return "5401";
17533         case TG3_PHY_ID_BCM5411:        return "5411";
17534         case TG3_PHY_ID_BCM5701:        return "5701";
17535         case TG3_PHY_ID_BCM5703:        return "5703";
17536         case TG3_PHY_ID_BCM5704:        return "5704";
17537         case TG3_PHY_ID_BCM5705:        return "5705";
17538         case TG3_PHY_ID_BCM5750:        return "5750";
17539         case TG3_PHY_ID_BCM5752:        return "5752";
17540         case TG3_PHY_ID_BCM5714:        return "5714";
17541         case TG3_PHY_ID_BCM5780:        return "5780";
17542         case TG3_PHY_ID_BCM5755:        return "5755";
17543         case TG3_PHY_ID_BCM5787:        return "5787";
17544         case TG3_PHY_ID_BCM5784:        return "5784";
17545         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17546         case TG3_PHY_ID_BCM5906:        return "5906";
17547         case TG3_PHY_ID_BCM5761:        return "5761";
17548         case TG3_PHY_ID_BCM5718C:       return "5718C";
17549         case TG3_PHY_ID_BCM5718S:       return "5718S";
17550         case TG3_PHY_ID_BCM57765:       return "57765";
17551         case TG3_PHY_ID_BCM5719C:       return "5719C";
17552         case TG3_PHY_ID_BCM5720C:       return "5720C";
17553         case TG3_PHY_ID_BCM5762:        return "5762C";
17554         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17555         case 0:                 return "serdes";
17556         default:                return "unknown";
17557         }
17558 }
17559
17560 static char *tg3_bus_string(struct tg3 *tp, char *str)
17561 {
17562         if (tg3_flag(tp, PCI_EXPRESS)) {
17563                 strcpy(str, "PCI Express");
17564                 return str;
17565         } else if (tg3_flag(tp, PCIX_MODE)) {
17566                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17567
17568                 strcpy(str, "PCIX:");
17569
17570                 if ((clock_ctrl == 7) ||
17571                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17572                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17573                         strcat(str, "133MHz");
17574                 else if (clock_ctrl == 0)
17575                         strcat(str, "33MHz");
17576                 else if (clock_ctrl == 2)
17577                         strcat(str, "50MHz");
17578                 else if (clock_ctrl == 4)
17579                         strcat(str, "66MHz");
17580                 else if (clock_ctrl == 6)
17581                         strcat(str, "100MHz");
17582         } else {
17583                 strcpy(str, "PCI:");
17584                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17585                         strcat(str, "66MHz");
17586                 else
17587                         strcat(str, "33MHz");
17588         }
17589         if (tg3_flag(tp, PCI_32BIT))
17590                 strcat(str, ":32-bit");
17591         else
17592                 strcat(str, ":64-bit");
17593         return str;
17594 }
17595
17596 static void tg3_init_coal(struct tg3 *tp)
17597 {
17598         struct ethtool_coalesce *ec = &tp->coal;
17599
17600         memset(ec, 0, sizeof(*ec));
17601         ec->cmd = ETHTOOL_GCOALESCE;
17602         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17603         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17604         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17605         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17606         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17607         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17608         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17609         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17610         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17611
17612         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17613                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17614                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17615                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17616                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17617                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17618         }
17619
17620         if (tg3_flag(tp, 5705_PLUS)) {
17621                 ec->rx_coalesce_usecs_irq = 0;
17622                 ec->tx_coalesce_usecs_irq = 0;
17623                 ec->stats_block_coalesce_usecs = 0;
17624         }
17625 }
17626
17627 static int tg3_init_one(struct pci_dev *pdev,
17628                                   const struct pci_device_id *ent)
17629 {
17630         struct net_device *dev;
17631         struct tg3 *tp;
17632         int i, err;
17633         u32 sndmbx, rcvmbx, intmbx;
17634         char str[40];
17635         u64 dma_mask, persist_dma_mask;
17636         netdev_features_t features = 0;
17637
17638         printk_once(KERN_INFO "%s\n", version);
17639
17640         err = pci_enable_device(pdev);
17641         if (err) {
17642                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17643                 return err;
17644         }
17645
17646         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17647         if (err) {
17648                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17649                 goto err_out_disable_pdev;
17650         }
17651
17652         pci_set_master(pdev);
17653
17654         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17655         if (!dev) {
17656                 err = -ENOMEM;
17657                 goto err_out_free_res;
17658         }
17659
17660         SET_NETDEV_DEV(dev, &pdev->dev);
17661
17662         tp = netdev_priv(dev);
17663         tp->pdev = pdev;
17664         tp->dev = dev;
17665         tp->rx_mode = TG3_DEF_RX_MODE;
17666         tp->tx_mode = TG3_DEF_TX_MODE;
17667         tp->irq_sync = 1;
17668         tp->pcierr_recovery = false;
17669
17670         if (tg3_debug > 0)
17671                 tp->msg_enable = tg3_debug;
17672         else
17673                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17674
17675         if (pdev_is_ssb_gige_core(pdev)) {
17676                 tg3_flag_set(tp, IS_SSB_CORE);
17677                 if (ssb_gige_must_flush_posted_writes(pdev))
17678                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17679                 if (ssb_gige_one_dma_at_once(pdev))
17680                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17681                 if (ssb_gige_have_roboswitch(pdev)) {
17682                         tg3_flag_set(tp, USE_PHYLIB);
17683                         tg3_flag_set(tp, ROBOSWITCH);
17684                 }
17685                 if (ssb_gige_is_rgmii(pdev))
17686                         tg3_flag_set(tp, RGMII_MODE);
17687         }
17688
17689         /* The word/byte swap controls here control register access byte
17690          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17691          * setting below.
17692          */
17693         tp->misc_host_ctrl =
17694                 MISC_HOST_CTRL_MASK_PCI_INT |
17695                 MISC_HOST_CTRL_WORD_SWAP |
17696                 MISC_HOST_CTRL_INDIR_ACCESS |
17697                 MISC_HOST_CTRL_PCISTATE_RW;
17698
17699         /* The NONFRM (non-frame) byte/word swap controls take effect
17700          * on descriptor entries, anything which isn't packet data.
17701          *
17702          * The StrongARM chips on the board (one for tx, one for rx)
17703          * are running in big-endian mode.
17704          */
17705         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17706                         GRC_MODE_WSWAP_NONFRM_DATA);
17707 #ifdef __BIG_ENDIAN
17708         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17709 #endif
17710         spin_lock_init(&tp->lock);
17711         spin_lock_init(&tp->indirect_lock);
17712         INIT_WORK(&tp->reset_task, tg3_reset_task);
17713
17714         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17715         if (!tp->regs) {
17716                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17717                 err = -ENOMEM;
17718                 goto err_out_free_dev;
17719         }
17720
17721         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17722             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17729             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17730             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17731             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17732             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17733             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17734             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17735             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17736                 tg3_flag_set(tp, ENABLE_APE);
17737                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17738                 if (!tp->aperegs) {
17739                         dev_err(&pdev->dev,
17740                                 "Cannot map APE registers, aborting\n");
17741                         err = -ENOMEM;
17742                         goto err_out_iounmap;
17743                 }
17744         }
17745
17746         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17747         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17748
17749         dev->ethtool_ops = &tg3_ethtool_ops;
17750         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17751         dev->netdev_ops = &tg3_netdev_ops;
17752         dev->irq = pdev->irq;
17753
17754         err = tg3_get_invariants(tp, ent);
17755         if (err) {
17756                 dev_err(&pdev->dev,
17757                         "Problem fetching invariants of chip, aborting\n");
17758                 goto err_out_apeunmap;
17759         }
17760
17761         /* The EPB bridge inside 5714, 5715, and 5780 and any
17762          * device behind the EPB cannot support DMA addresses > 40-bit.
17763          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17764          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17765          * do DMA address check in tg3_start_xmit().
17766          */
17767         if (tg3_flag(tp, IS_5788))
17768                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17769         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17770                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17771 #ifdef CONFIG_HIGHMEM
17772                 dma_mask = DMA_BIT_MASK(64);
17773 #endif
17774         } else
17775                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17776
17777         /* Configure DMA attributes. */
17778         if (dma_mask > DMA_BIT_MASK(32)) {
17779                 err = pci_set_dma_mask(pdev, dma_mask);
17780                 if (!err) {
17781                         features |= NETIF_F_HIGHDMA;
17782                         err = pci_set_consistent_dma_mask(pdev,
17783                                                           persist_dma_mask);
17784                         if (err < 0) {
17785                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17786                                         "DMA for consistent allocations\n");
17787                                 goto err_out_apeunmap;
17788                         }
17789                 }
17790         }
17791         if (err || dma_mask == DMA_BIT_MASK(32)) {
17792                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17793                 if (err) {
17794                         dev_err(&pdev->dev,
17795                                 "No usable DMA configuration, aborting\n");
17796                         goto err_out_apeunmap;
17797                 }
17798         }
17799
17800         tg3_init_bufmgr_config(tp);
17801
17802         /* 5700 B0 chips do not support checksumming correctly due
17803          * to hardware bugs.
17804          */
17805         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17806                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17807
17808                 if (tg3_flag(tp, 5755_PLUS))
17809                         features |= NETIF_F_IPV6_CSUM;
17810         }
17811
17812         /* TSO is on by default on chips that support hardware TSO.
17813          * Firmware TSO on older chips gives lower performance, so it
17814          * is off by default, but can be enabled using ethtool.
17815          */
17816         if ((tg3_flag(tp, HW_TSO_1) ||
17817              tg3_flag(tp, HW_TSO_2) ||
17818              tg3_flag(tp, HW_TSO_3)) &&
17819             (features & NETIF_F_IP_CSUM))
17820                 features |= NETIF_F_TSO;
17821         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17822                 if (features & NETIF_F_IPV6_CSUM)
17823                         features |= NETIF_F_TSO6;
17824                 if (tg3_flag(tp, HW_TSO_3) ||
17825                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17826                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17827                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17828                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17829                     tg3_asic_rev(tp) == ASIC_REV_57780)
17830                         features |= NETIF_F_TSO_ECN;
17831         }
17832
17833         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17834                          NETIF_F_HW_VLAN_CTAG_RX;
17835         dev->vlan_features |= features;
17836
17837         /*
17838          * Add loopback capability only for a subset of devices that support
17839          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17840          * loopback for the remaining devices.
17841          */
17842         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17843             !tg3_flag(tp, CPMU_PRESENT))
17844                 /* Add the loopback capability */
17845                 features |= NETIF_F_LOOPBACK;
17846
17847         dev->hw_features |= features;
17848         dev->priv_flags |= IFF_UNICAST_FLT;
17849
17850         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17851         dev->min_mtu = TG3_MIN_MTU;
17852         dev->max_mtu = TG3_MAX_MTU(tp);
17853
17854         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17855             !tg3_flag(tp, TSO_CAPABLE) &&
17856             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17857                 tg3_flag_set(tp, MAX_RXPEND_64);
17858                 tp->rx_pending = 63;
17859         }
17860
17861         err = tg3_get_device_address(tp);
17862         if (err) {
17863                 dev_err(&pdev->dev,
17864                         "Could not obtain valid ethernet address, aborting\n");
17865                 goto err_out_apeunmap;
17866         }
17867
17868         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17869         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17870         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17871         for (i = 0; i < tp->irq_max; i++) {
17872                 struct tg3_napi *tnapi = &tp->napi[i];
17873
17874                 tnapi->tp = tp;
17875                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17876
17877                 tnapi->int_mbox = intmbx;
17878                 if (i <= 4)
17879                         intmbx += 0x8;
17880                 else
17881                         intmbx += 0x4;
17882
17883                 tnapi->consmbox = rcvmbx;
17884                 tnapi->prodmbox = sndmbx;
17885
17886                 if (i)
17887                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17888                 else
17889                         tnapi->coal_now = HOSTCC_MODE_NOW;
17890
17891                 if (!tg3_flag(tp, SUPPORT_MSIX))
17892                         break;
17893
17894                 /*
17895                  * If we support MSIX, we'll be using RSS.  If we're using
17896                  * RSS, the first vector only handles link interrupts and the
17897                  * remaining vectors handle rx and tx interrupts.  Reuse the
17898                  * mailbox values for the next iteration.  The values we setup
17899                  * above are still useful for the single vectored mode.
17900                  */
17901                 if (!i)
17902                         continue;
17903
17904                 rcvmbx += 0x8;
17905
17906                 if (sndmbx & 0x4)
17907                         sndmbx -= 0x4;
17908                 else
17909                         sndmbx += 0xc;
17910         }
17911
17912         /*
17913          * Reset chip in case UNDI or EFI driver did not shutdown
17914          * DMA self test will enable WDMAC and we'll see (spurious)
17915          * pending DMA on the PCI bus at that point.
17916          */
17917         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17918             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17919                 tg3_full_lock(tp, 0);
17920                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17921                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17922                 tg3_full_unlock(tp);
17923         }
17924
17925         err = tg3_test_dma(tp);
17926         if (err) {
17927                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17928                 goto err_out_apeunmap;
17929         }
17930
17931         tg3_init_coal(tp);
17932
17933         pci_set_drvdata(pdev, dev);
17934
17935         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17936             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17937             tg3_asic_rev(tp) == ASIC_REV_5762)
17938                 tg3_flag_set(tp, PTP_CAPABLE);
17939
17940         tg3_timer_init(tp);
17941
17942         tg3_carrier_off(tp);
17943
17944         err = register_netdev(dev);
17945         if (err) {
17946                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17947                 goto err_out_apeunmap;
17948         }
17949
17950         if (tg3_flag(tp, PTP_CAPABLE)) {
17951                 tg3_ptp_init(tp);
17952                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17953                                                    &tp->pdev->dev);
17954                 if (IS_ERR(tp->ptp_clock))
17955                         tp->ptp_clock = NULL;
17956         }
17957
17958         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17959                     tp->board_part_number,
17960                     tg3_chip_rev_id(tp),
17961                     tg3_bus_string(tp, str),
17962                     dev->dev_addr);
17963
17964         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17965                 char *ethtype;
17966
17967                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17968                         ethtype = "10/100Base-TX";
17969                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17970                         ethtype = "1000Base-SX";
17971                 else
17972                         ethtype = "10/100/1000Base-T";
17973
17974                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17975                             "(WireSpeed[%d], EEE[%d])\n",
17976                             tg3_phy_string(tp), ethtype,
17977                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17978                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17979         }
17980
17981         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17982                     (dev->features & NETIF_F_RXCSUM) != 0,
17983                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17984                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17985                     tg3_flag(tp, ENABLE_ASF) != 0,
17986                     tg3_flag(tp, TSO_CAPABLE) != 0);
17987         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17988                     tp->dma_rwctrl,
17989                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17990                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17991
17992         pci_save_state(pdev);
17993
17994         return 0;
17995
17996 err_out_apeunmap:
17997         if (tp->aperegs) {
17998                 iounmap(tp->aperegs);
17999                 tp->aperegs = NULL;
18000         }
18001
18002 err_out_iounmap:
18003         if (tp->regs) {
18004                 iounmap(tp->regs);
18005                 tp->regs = NULL;
18006         }
18007
18008 err_out_free_dev:
18009         free_netdev(dev);
18010
18011 err_out_free_res:
18012         pci_release_regions(pdev);
18013
18014 err_out_disable_pdev:
18015         if (pci_is_enabled(pdev))
18016                 pci_disable_device(pdev);
18017         return err;
18018 }
18019
18020 static void tg3_remove_one(struct pci_dev *pdev)
18021 {
18022         struct net_device *dev = pci_get_drvdata(pdev);
18023
18024         if (dev) {
18025                 struct tg3 *tp = netdev_priv(dev);
18026
18027                 tg3_ptp_fini(tp);
18028
18029                 release_firmware(tp->fw);
18030
18031                 tg3_reset_task_cancel(tp);
18032
18033                 if (tg3_flag(tp, USE_PHYLIB)) {
18034                         tg3_phy_fini(tp);
18035                         tg3_mdio_fini(tp);
18036                 }
18037
18038                 unregister_netdev(dev);
18039                 if (tp->aperegs) {
18040                         iounmap(tp->aperegs);
18041                         tp->aperegs = NULL;
18042                 }
18043                 if (tp->regs) {
18044                         iounmap(tp->regs);
18045                         tp->regs = NULL;
18046                 }
18047                 free_netdev(dev);
18048                 pci_release_regions(pdev);
18049                 pci_disable_device(pdev);
18050         }
18051 }
18052
18053 #ifdef CONFIG_PM_SLEEP
18054 static int tg3_suspend(struct device *device)
18055 {
18056         struct pci_dev *pdev = to_pci_dev(device);
18057         struct net_device *dev = pci_get_drvdata(pdev);
18058         struct tg3 *tp = netdev_priv(dev);
18059         int err = 0;
18060
18061         rtnl_lock();
18062
18063         if (!netif_running(dev))
18064                 goto unlock;
18065
18066         tg3_reset_task_cancel(tp);
18067         tg3_phy_stop(tp);
18068         tg3_netif_stop(tp);
18069
18070         tg3_timer_stop(tp);
18071
18072         tg3_full_lock(tp, 1);
18073         tg3_disable_ints(tp);
18074         tg3_full_unlock(tp);
18075
18076         netif_device_detach(dev);
18077
18078         tg3_full_lock(tp, 0);
18079         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18080         tg3_flag_clear(tp, INIT_COMPLETE);
18081         tg3_full_unlock(tp);
18082
18083         err = tg3_power_down_prepare(tp);
18084         if (err) {
18085                 int err2;
18086
18087                 tg3_full_lock(tp, 0);
18088
18089                 tg3_flag_set(tp, INIT_COMPLETE);
18090                 err2 = tg3_restart_hw(tp, true);
18091                 if (err2)
18092                         goto out;
18093
18094                 tg3_timer_start(tp);
18095
18096                 netif_device_attach(dev);
18097                 tg3_netif_start(tp);
18098
18099 out:
18100                 tg3_full_unlock(tp);
18101
18102                 if (!err2)
18103                         tg3_phy_start(tp);
18104         }
18105
18106 unlock:
18107         rtnl_unlock();
18108         return err;
18109 }
18110
18111 static int tg3_resume(struct device *device)
18112 {
18113         struct pci_dev *pdev = to_pci_dev(device);
18114         struct net_device *dev = pci_get_drvdata(pdev);
18115         struct tg3 *tp = netdev_priv(dev);
18116         int err = 0;
18117
18118         rtnl_lock();
18119
18120         if (!netif_running(dev))
18121                 goto unlock;
18122
18123         netif_device_attach(dev);
18124
18125         tg3_full_lock(tp, 0);
18126
18127         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18128
18129         tg3_flag_set(tp, INIT_COMPLETE);
18130         err = tg3_restart_hw(tp,
18131                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18132         if (err)
18133                 goto out;
18134
18135         tg3_timer_start(tp);
18136
18137         tg3_netif_start(tp);
18138
18139 out:
18140         tg3_full_unlock(tp);
18141
18142         if (!err)
18143                 tg3_phy_start(tp);
18144
18145 unlock:
18146         rtnl_unlock();
18147         return err;
18148 }
18149 #endif /* CONFIG_PM_SLEEP */
18150
18151 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18152
18153 static void tg3_shutdown(struct pci_dev *pdev)
18154 {
18155         struct net_device *dev = pci_get_drvdata(pdev);
18156         struct tg3 *tp = netdev_priv(dev);
18157
18158         rtnl_lock();
18159         netif_device_detach(dev);
18160
18161         if (netif_running(dev))
18162                 dev_close(dev);
18163
18164         if (system_state == SYSTEM_POWER_OFF)
18165                 tg3_power_down(tp);
18166
18167         rtnl_unlock();
18168 }
18169
18170 /**
18171  * tg3_io_error_detected - called when PCI error is detected
18172  * @pdev: Pointer to PCI device
18173  * @state: The current pci connection state
18174  *
18175  * This function is called after a PCI bus error affecting
18176  * this device has been detected.
18177  */
18178 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18179                                               pci_channel_state_t state)
18180 {
18181         struct net_device *netdev = pci_get_drvdata(pdev);
18182         struct tg3 *tp = netdev_priv(netdev);
18183         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18184
18185         netdev_info(netdev, "PCI I/O error detected\n");
18186
18187         rtnl_lock();
18188
18189         /* We probably don't have netdev yet */
18190         if (!netdev || !netif_running(netdev))
18191                 goto done;
18192
18193         /* We needn't recover from permanent error */
18194         if (state == pci_channel_io_frozen)
18195                 tp->pcierr_recovery = true;
18196
18197         tg3_phy_stop(tp);
18198
18199         tg3_netif_stop(tp);
18200
18201         tg3_timer_stop(tp);
18202
18203         /* Want to make sure that the reset task doesn't run */
18204         tg3_reset_task_cancel(tp);
18205
18206         netif_device_detach(netdev);
18207
18208         /* Clean up software state, even if MMIO is blocked */
18209         tg3_full_lock(tp, 0);
18210         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18211         tg3_full_unlock(tp);
18212
18213 done:
18214         if (state == pci_channel_io_perm_failure) {
18215                 if (netdev) {
18216                         tg3_napi_enable(tp);
18217                         dev_close(netdev);
18218                 }
18219                 err = PCI_ERS_RESULT_DISCONNECT;
18220         } else {
18221                 pci_disable_device(pdev);
18222         }
18223
18224         rtnl_unlock();
18225
18226         return err;
18227 }
18228
18229 /**
18230  * tg3_io_slot_reset - called after the pci bus has been reset.
18231  * @pdev: Pointer to PCI device
18232  *
18233  * Restart the card from scratch, as if from a cold-boot.
18234  * At this point, the card has exprienced a hard reset,
18235  * followed by fixups by BIOS, and has its config space
18236  * set up identically to what it was at cold boot.
18237  */
18238 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18239 {
18240         struct net_device *netdev = pci_get_drvdata(pdev);
18241         struct tg3 *tp = netdev_priv(netdev);
18242         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18243         int err;
18244
18245         rtnl_lock();
18246
18247         if (pci_enable_device(pdev)) {
18248                 dev_err(&pdev->dev,
18249                         "Cannot re-enable PCI device after reset.\n");
18250                 goto done;
18251         }
18252
18253         pci_set_master(pdev);
18254         pci_restore_state(pdev);
18255         pci_save_state(pdev);
18256
18257         if (!netdev || !netif_running(netdev)) {
18258                 rc = PCI_ERS_RESULT_RECOVERED;
18259                 goto done;
18260         }
18261
18262         err = tg3_power_up(tp);
18263         if (err)
18264                 goto done;
18265
18266         rc = PCI_ERS_RESULT_RECOVERED;
18267
18268 done:
18269         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18270                 tg3_napi_enable(tp);
18271                 dev_close(netdev);
18272         }
18273         rtnl_unlock();
18274
18275         return rc;
18276 }
18277
18278 /**
18279  * tg3_io_resume - called when traffic can start flowing again.
18280  * @pdev: Pointer to PCI device
18281  *
18282  * This callback is called when the error recovery driver tells
18283  * us that its OK to resume normal operation.
18284  */
18285 static void tg3_io_resume(struct pci_dev *pdev)
18286 {
18287         struct net_device *netdev = pci_get_drvdata(pdev);
18288         struct tg3 *tp = netdev_priv(netdev);
18289         int err;
18290
18291         rtnl_lock();
18292
18293         if (!netdev || !netif_running(netdev))
18294                 goto done;
18295
18296         tg3_full_lock(tp, 0);
18297         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18298         tg3_flag_set(tp, INIT_COMPLETE);
18299         err = tg3_restart_hw(tp, true);
18300         if (err) {
18301                 tg3_full_unlock(tp);
18302                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18303                 goto done;
18304         }
18305
18306         netif_device_attach(netdev);
18307
18308         tg3_timer_start(tp);
18309
18310         tg3_netif_start(tp);
18311
18312         tg3_full_unlock(tp);
18313
18314         tg3_phy_start(tp);
18315
18316 done:
18317         tp->pcierr_recovery = false;
18318         rtnl_unlock();
18319 }
18320
18321 static const struct pci_error_handlers tg3_err_handler = {
18322         .error_detected = tg3_io_error_detected,
18323         .slot_reset     = tg3_io_slot_reset,
18324         .resume         = tg3_io_resume
18325 };
18326
18327 static struct pci_driver tg3_driver = {
18328         .name           = DRV_MODULE_NAME,
18329         .id_table       = tg3_pci_tbl,
18330         .probe          = tg3_init_one,
18331         .remove         = tg3_remove_one,
18332         .err_handler    = &tg3_err_handler,
18333         .driver.pm      = &tg3_pm_ops,
18334         .shutdown       = tg3_shutdown,
18335 };
18336
18337 module_pci_driver(tg3_driver);