]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 #define TG3_MAJ_NUM                     3
100 #define TG3_MIN_NUM                     137
101 #define DRV_MODULE_VERSION      \
102         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE      "May 11, 2014"
104
105 #define RESET_KIND_SHUTDOWN     0
106 #define RESET_KIND_INIT         1
107 #define RESET_KIND_SUSPEND      2
108
109 #define TG3_DEF_RX_MODE         0
110 #define TG3_DEF_TX_MODE         0
111 #define TG3_DEF_MSG_ENABLE        \
112         (NETIF_MSG_DRV          | \
113          NETIF_MSG_PROBE        | \
114          NETIF_MSG_LINK         | \
115          NETIF_MSG_TIMER        | \
116          NETIF_MSG_IFDOWN       | \
117          NETIF_MSG_IFUP         | \
118          NETIF_MSG_RX_ERR       | \
119          NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
122
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126
127 #define TG3_TX_TIMEOUT                  (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU                     ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING         200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153
154 #define TG3_TX_RING_SIZE                512
155 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
164                                  TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB               64
168
169 #define TG3_RX_STD_DMA_SZ               1536
170 #define TG3_RX_JMB_DMA_SZ               9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD           256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
197 #else
198         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K            2048
210 #define TG3_TX_BD_DMA_MAX_4K            4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
218 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3            "tigon/tg3.bin"
221 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
224
225 static char version[] =
226         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235
236 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
242
243 static const struct pci_device_id tg3_pci_tbl[] = {
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271                         TG3_DRV_DATA_FLAG_5705_10_100},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293                         PCI_VENDOR_ID_LENOVO,
294                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359         {}
360 };
361
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363
364 static const struct {
365         const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367         { "rx_octets" },
368         { "rx_fragments" },
369         { "rx_ucast_packets" },
370         { "rx_mcast_packets" },
371         { "rx_bcast_packets" },
372         { "rx_fcs_errors" },
373         { "rx_align_errors" },
374         { "rx_xon_pause_rcvd" },
375         { "rx_xoff_pause_rcvd" },
376         { "rx_mac_ctrl_rcvd" },
377         { "rx_xoff_entered" },
378         { "rx_frame_too_long_errors" },
379         { "rx_jabbers" },
380         { "rx_undersize_packets" },
381         { "rx_in_length_errors" },
382         { "rx_out_length_errors" },
383         { "rx_64_or_less_octet_packets" },
384         { "rx_65_to_127_octet_packets" },
385         { "rx_128_to_255_octet_packets" },
386         { "rx_256_to_511_octet_packets" },
387         { "rx_512_to_1023_octet_packets" },
388         { "rx_1024_to_1522_octet_packets" },
389         { "rx_1523_to_2047_octet_packets" },
390         { "rx_2048_to_4095_octet_packets" },
391         { "rx_4096_to_8191_octet_packets" },
392         { "rx_8192_to_9022_octet_packets" },
393
394         { "tx_octets" },
395         { "tx_collisions" },
396
397         { "tx_xon_sent" },
398         { "tx_xoff_sent" },
399         { "tx_flow_control" },
400         { "tx_mac_errors" },
401         { "tx_single_collisions" },
402         { "tx_mult_collisions" },
403         { "tx_deferred" },
404         { "tx_excessive_collisions" },
405         { "tx_late_collisions" },
406         { "tx_collide_2times" },
407         { "tx_collide_3times" },
408         { "tx_collide_4times" },
409         { "tx_collide_5times" },
410         { "tx_collide_6times" },
411         { "tx_collide_7times" },
412         { "tx_collide_8times" },
413         { "tx_collide_9times" },
414         { "tx_collide_10times" },
415         { "tx_collide_11times" },
416         { "tx_collide_12times" },
417         { "tx_collide_13times" },
418         { "tx_collide_14times" },
419         { "tx_collide_15times" },
420         { "tx_ucast_packets" },
421         { "tx_mcast_packets" },
422         { "tx_bcast_packets" },
423         { "tx_carrier_sense_errors" },
424         { "tx_discards" },
425         { "tx_errors" },
426
427         { "dma_writeq_full" },
428         { "dma_write_prioq_full" },
429         { "rxbds_empty" },
430         { "rx_discards" },
431         { "rx_errors" },
432         { "rx_threshold_hit" },
433
434         { "dma_readq_full" },
435         { "dma_read_prioq_full" },
436         { "tx_comp_queue_full" },
437
438         { "ring_set_send_prod_index" },
439         { "ring_status_update" },
440         { "nic_irqs" },
441         { "nic_avoided_irqs" },
442         { "nic_tx_threshold_hit" },
443
444         { "mbuf_lwm_thresh_hit" },
445 };
446
447 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST          0
449 #define TG3_LINK_TEST           1
450 #define TG3_REGISTER_TEST       2
451 #define TG3_MEMORY_TEST         3
452 #define TG3_MAC_LOOPB_TEST      4
453 #define TG3_PHY_LOOPB_TEST      5
454 #define TG3_EXT_LOOPB_TEST      6
455 #define TG3_INTERRUPT_TEST      7
456
457
458 static const struct {
459         const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
462         [TG3_LINK_TEST]         = { "link test         (online) " },
463         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
464         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
465         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
466         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
467         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
468         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
469 };
470
471 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
472
473
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476         writel(val, tp->regs + off);
477 }
478
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481         return readl(tp->regs + off);
482 }
483
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486         writel(val, tp->aperegs + off);
487 }
488
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491         return readl(tp->aperegs + off);
492 }
493
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496         unsigned long flags;
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506         writel(val, tp->regs + off);
507         readl(tp->regs + off);
508 }
509
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512         unsigned long flags;
513         u32 val;
514
515         spin_lock_irqsave(&tp->indirect_lock, flags);
516         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518         spin_unlock_irqrestore(&tp->indirect_lock, flags);
519         return val;
520 }
521
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         unsigned long flags;
525
526         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528                                        TG3_64BIT_REG_LOW, val);
529                 return;
530         }
531         if (off == TG3_RX_STD_PROD_IDX_REG) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533                                        TG3_64BIT_REG_LOW, val);
534                 return;
535         }
536
537         spin_lock_irqsave(&tp->indirect_lock, flags);
538         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540         spin_unlock_irqrestore(&tp->indirect_lock, flags);
541
542         /* In indirect mode when disabling interrupts, we also need
543          * to clear the interrupt bit in the GRC local ctrl register.
544          */
545         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546             (val == 0x1)) {
547                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549         }
550 }
551
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554         unsigned long flags;
555         u32 val;
556
557         spin_lock_irqsave(&tp->indirect_lock, flags);
558         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560         spin_unlock_irqrestore(&tp->indirect_lock, flags);
561         return val;
562 }
563
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572                 /* Non-posted methods */
573                 tp->write32(tp, off, val);
574         else {
575                 /* Posted method */
576                 tg3_write32(tp, off, val);
577                 if (usec_wait)
578                         udelay(usec_wait);
579                 tp->read32(tp, off);
580         }
581         /* Wait again after the read for the posted method to guarantee that
582          * the wait time is met.
583          */
584         if (usec_wait)
585                 udelay(usec_wait);
586 }
587
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590         tp->write32_mbox(tp, off, val);
591         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593              !tg3_flag(tp, ICH_WORKAROUND)))
594                 tp->read32_mbox(tp, off);
595 }
596
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599         void __iomem *mbox = tp->regs + off;
600         writel(val, mbox);
601         if (tg3_flag(tp, TXD_MBOX_HWBUG))
602                 writel(val, mbox);
603         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604             tg3_flag(tp, FLUSH_POSTED_WRITES))
605                 readl(mbox);
606 }
607
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610         return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615         writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617
618 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
623
624 #define tw32(reg, val)                  tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)                       tp->read32(tp, reg)
628
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631         unsigned long flags;
632
633         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635                 return;
636
637         spin_lock_irqsave(&tp->indirect_lock, flags);
638         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         } else {
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         }
651         spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656         unsigned long flags;
657
658         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660                 *val = 0;
661                 return;
662         }
663
664         spin_lock_irqsave(&tp->indirect_lock, flags);
665         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668
669                 /* Always leave this as zero. */
670                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         } else {
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673                 *val = tr32(TG3PCI_MEM_WIN_DATA);
674
675                 /* Always leave this as zero. */
676                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677         }
678         spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683         int i;
684         u32 regbase, bit;
685
686         if (tg3_asic_rev(tp) == ASIC_REV_5761)
687                 regbase = TG3_APE_LOCK_GRANT;
688         else
689                 regbase = TG3_APE_PER_LOCK_GRANT;
690
691         /* Make sure the driver hasn't any stale locks. */
692         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693                 switch (i) {
694                 case TG3_APE_LOCK_PHY0:
695                 case TG3_APE_LOCK_PHY1:
696                 case TG3_APE_LOCK_PHY2:
697                 case TG3_APE_LOCK_PHY3:
698                         bit = APE_LOCK_GRANT_DRIVER;
699                         break;
700                 default:
701                         if (!tp->pci_fn)
702                                 bit = APE_LOCK_GRANT_DRIVER;
703                         else
704                                 bit = 1 << tp->pci_fn;
705                 }
706                 tg3_ape_write32(tp, regbase + 4 * i, bit);
707         }
708
709 }
710
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713         int i, off;
714         int ret = 0;
715         u32 status, req, gnt, bit;
716
717         if (!tg3_flag(tp, ENABLE_APE))
718                 return 0;
719
720         switch (locknum) {
721         case TG3_APE_LOCK_GPIO:
722                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723                         return 0;
724                 /* fall through */
725         case TG3_APE_LOCK_GRC:
726         case TG3_APE_LOCK_MEM:
727                 if (!tp->pci_fn)
728                         bit = APE_LOCK_REQ_DRIVER;
729                 else
730                         bit = 1 << tp->pci_fn;
731                 break;
732         case TG3_APE_LOCK_PHY0:
733         case TG3_APE_LOCK_PHY1:
734         case TG3_APE_LOCK_PHY2:
735         case TG3_APE_LOCK_PHY3:
736                 bit = APE_LOCK_REQ_DRIVER;
737                 break;
738         default:
739                 return -EINVAL;
740         }
741
742         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
743                 req = TG3_APE_LOCK_REQ;
744                 gnt = TG3_APE_LOCK_GRANT;
745         } else {
746                 req = TG3_APE_PER_LOCK_REQ;
747                 gnt = TG3_APE_PER_LOCK_GRANT;
748         }
749
750         off = 4 * locknum;
751
752         tg3_ape_write32(tp, req + off, bit);
753
754         /* Wait for up to 1 millisecond to acquire lock. */
755         for (i = 0; i < 100; i++) {
756                 status = tg3_ape_read32(tp, gnt + off);
757                 if (status == bit)
758                         break;
759                 if (pci_channel_offline(tp->pdev))
760                         break;
761
762                 udelay(10);
763         }
764
765         if (status != bit) {
766                 /* Revoke the lock request. */
767                 tg3_ape_write32(tp, gnt + off, bit);
768                 ret = -EBUSY;
769         }
770
771         return ret;
772 }
773
774 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 {
776         u32 gnt, bit;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (locknum) {
782         case TG3_APE_LOCK_GPIO:
783                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
784                         return;
785                 /* fall through */
786         case TG3_APE_LOCK_GRC:
787         case TG3_APE_LOCK_MEM:
788                 if (!tp->pci_fn)
789                         bit = APE_LOCK_GRANT_DRIVER;
790                 else
791                         bit = 1 << tp->pci_fn;
792                 break;
793         case TG3_APE_LOCK_PHY0:
794         case TG3_APE_LOCK_PHY1:
795         case TG3_APE_LOCK_PHY2:
796         case TG3_APE_LOCK_PHY3:
797                 bit = APE_LOCK_GRANT_DRIVER;
798                 break;
799         default:
800                 return;
801         }
802
803         if (tg3_asic_rev(tp) == ASIC_REV_5761)
804                 gnt = TG3_APE_LOCK_GRANT;
805         else
806                 gnt = TG3_APE_PER_LOCK_GRANT;
807
808         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 }
810
811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 {
813         u32 apedata;
814
815         while (timeout_us) {
816                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
817                         return -EBUSY;
818
819                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
820                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
821                         break;
822
823                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
824
825                 udelay(10);
826                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827         }
828
829         return timeout_us ? 0 : -EBUSY;
830 }
831
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
834 {
835         u32 i, apedata;
836
837         for (i = 0; i < timeout_us / 10; i++) {
838                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
839
840                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
841                         break;
842
843                 udelay(10);
844         }
845
846         return i == timeout_us / 10;
847 }
848
849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
850                                    u32 len)
851 {
852         int err;
853         u32 i, bufoff, msgoff, maxlen, apedata;
854
855         if (!tg3_flag(tp, APE_HAS_NCSI))
856                 return 0;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
859         if (apedata != APE_SEG_SIG_MAGIC)
860                 return -ENODEV;
861
862         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
863         if (!(apedata & APE_FW_STATUS_READY))
864                 return -EAGAIN;
865
866         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
867                  TG3_APE_SHMEM_BASE;
868         msgoff = bufoff + 2 * sizeof(u32);
869         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870
871         while (len) {
872                 u32 length;
873
874                 /* Cap xfer sizes to scratchpad limits. */
875                 length = (len > maxlen) ? maxlen : len;
876                 len -= length;
877
878                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
879                 if (!(apedata & APE_FW_STATUS_READY))
880                         return -EAGAIN;
881
882                 /* Wait for up to 1 msec for APE to service previous event. */
883                 err = tg3_ape_event_lock(tp, 1000);
884                 if (err)
885                         return err;
886
887                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
888                           APE_EVENT_STATUS_SCRTCHPD_READ |
889                           APE_EVENT_STATUS_EVENT_PENDING;
890                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
891
892                 tg3_ape_write32(tp, bufoff, base_off);
893                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
894
895                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
896                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
897
898                 base_off += length;
899
900                 if (tg3_ape_wait_for_event(tp, 30000))
901                         return -EAGAIN;
902
903                 for (i = 0; length; i += 4, length -= 4) {
904                         u32 val = tg3_ape_read32(tp, msgoff + i);
905                         memcpy(data, &val, sizeof(u32));
906                         data++;
907                 }
908         }
909
910         return 0;
911 }
912 #endif
913
914 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 {
916         int err;
917         u32 apedata;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
920         if (apedata != APE_SEG_SIG_MAGIC)
921                 return -EAGAIN;
922
923         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
924         if (!(apedata & APE_FW_STATUS_READY))
925                 return -EAGAIN;
926
927         /* Wait for up to 20 millisecond for APE to service previous event. */
928         err = tg3_ape_event_lock(tp, 20000);
929         if (err)
930                 return err;
931
932         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
933                         event | APE_EVENT_STATUS_EVENT_PENDING);
934
935         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
936         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937
938         return 0;
939 }
940
941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 {
943         u32 event;
944         u32 apedata;
945
946         if (!tg3_flag(tp, ENABLE_APE))
947                 return;
948
949         switch (kind) {
950         case RESET_KIND_INIT:
951                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
952                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
953                                 APE_HOST_SEG_SIG_MAGIC);
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
955                                 APE_HOST_SEG_LEN_MAGIC);
956                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
957                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
958                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
959                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
960                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
961                                 APE_HOST_BEHAV_NO_PHYLOCK);
962                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
963                                     TG3_APE_HOST_DRVR_STATE_START);
964
965                 event = APE_EVENT_STATUS_STATE_START;
966                 break;
967         case RESET_KIND_SHUTDOWN:
968                 if (device_may_wakeup(&tp->pdev->dev) &&
969                     tg3_flag(tp, WOL_ENABLE)) {
970                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971                                             TG3_APE_HOST_WOL_SPEED_AUTO);
972                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973                 } else
974                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975
976                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977
978                 event = APE_EVENT_STATUS_STATE_UNLOAD;
979                 break;
980         default:
981                 return;
982         }
983
984         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985
986         tg3_ape_send_event(tp, event);
987 }
988
989 static void tg3_send_ape_heartbeat(struct tg3 *tp,
990                                    unsigned long interval)
991 {
992         /* Check if hb interval has exceeded */
993         if (!tg3_flag(tp, ENABLE_APE) ||
994             time_before(jiffies, tp->ape_hb_jiffies + interval))
995                 return;
996
997         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
998         tp->ape_hb_jiffies = jiffies;
999 }
1000
1001 static void tg3_disable_ints(struct tg3 *tp)
1002 {
1003         int i;
1004
1005         tw32(TG3PCI_MISC_HOST_CTRL,
1006              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1007         for (i = 0; i < tp->irq_max; i++)
1008                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1009 }
1010
1011 static void tg3_enable_ints(struct tg3 *tp)
1012 {
1013         int i;
1014
1015         tp->irq_sync = 0;
1016         wmb();
1017
1018         tw32(TG3PCI_MISC_HOST_CTRL,
1019              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1020
1021         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1022         for (i = 0; i < tp->irq_cnt; i++) {
1023                 struct tg3_napi *tnapi = &tp->napi[i];
1024
1025                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026                 if (tg3_flag(tp, 1SHOT_MSI))
1027                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1028
1029                 tp->coal_now |= tnapi->coal_now;
1030         }
1031
1032         /* Force an initial interrupt */
1033         if (!tg3_flag(tp, TAGGED_STATUS) &&
1034             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1035                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1036         else
1037                 tw32(HOSTCC_MODE, tp->coal_now);
1038
1039         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1040 }
1041
1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1043 {
1044         struct tg3 *tp = tnapi->tp;
1045         struct tg3_hw_status *sblk = tnapi->hw_status;
1046         unsigned int work_exists = 0;
1047
1048         /* check for phy events */
1049         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1050                 if (sblk->status & SD_STATUS_LINK_CHG)
1051                         work_exists = 1;
1052         }
1053
1054         /* check for TX work to do */
1055         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1056                 work_exists = 1;
1057
1058         /* check for RX work to do */
1059         if (tnapi->rx_rcb_prod_idx &&
1060             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061                 work_exists = 1;
1062
1063         return work_exists;
1064 }
1065
1066 /* tg3_int_reenable
1067  *  similar to tg3_enable_ints, but it accurately determines whether there
1068  *  is new work pending and can return without flushing the PIO write
1069  *  which reenables interrupts
1070  */
1071 static void tg3_int_reenable(struct tg3_napi *tnapi)
1072 {
1073         struct tg3 *tp = tnapi->tp;
1074
1075         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1076         mmiowb();
1077
1078         /* When doing tagged status, this work check is unnecessary.
1079          * The last_tag we write above tells the chip which piece of
1080          * work we've completed.
1081          */
1082         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1083                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1084                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1085 }
1086
1087 static void tg3_switch_clocks(struct tg3 *tp)
1088 {
1089         u32 clock_ctrl;
1090         u32 orig_clock_ctrl;
1091
1092         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1093                 return;
1094
1095         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1096
1097         orig_clock_ctrl = clock_ctrl;
1098         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1099                        CLOCK_CTRL_CLKRUN_OENABLE |
1100                        0x1f);
1101         tp->pci_clock_ctrl = clock_ctrl;
1102
1103         if (tg3_flag(tp, 5705_PLUS)) {
1104                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1105                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1107                 }
1108         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1109                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110                             clock_ctrl |
1111                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1112                             40);
1113                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1114                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1115                             40);
1116         }
1117         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1118 }
1119
1120 #define PHY_BUSY_LOOPS  5000
1121
1122 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1123                          u32 *val)
1124 {
1125         u32 frame_val;
1126         unsigned int loops;
1127         int ret;
1128
1129         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1130                 tw32_f(MAC_MI_MODE,
1131                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1132                 udelay(80);
1133         }
1134
1135         tg3_ape_lock(tp, tp->phy_ape_lock);
1136
1137         *val = 0x0;
1138
1139         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1140                       MI_COM_PHY_ADDR_MASK);
1141         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1142                       MI_COM_REG_ADDR_MASK);
1143         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1144
1145         tw32_f(MAC_MI_COM, frame_val);
1146
1147         loops = PHY_BUSY_LOOPS;
1148         while (loops != 0) {
1149                 udelay(10);
1150                 frame_val = tr32(MAC_MI_COM);
1151
1152                 if ((frame_val & MI_COM_BUSY) == 0) {
1153                         udelay(5);
1154                         frame_val = tr32(MAC_MI_COM);
1155                         break;
1156                 }
1157                 loops -= 1;
1158         }
1159
1160         ret = -EBUSY;
1161         if (loops != 0) {
1162                 *val = frame_val & MI_COM_DATA_MASK;
1163                 ret = 0;
1164         }
1165
1166         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1167                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1168                 udelay(80);
1169         }
1170
1171         tg3_ape_unlock(tp, tp->phy_ape_lock);
1172
1173         return ret;
1174 }
1175
1176 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1177 {
1178         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1179 }
1180
1181 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1182                           u32 val)
1183 {
1184         u32 frame_val;
1185         unsigned int loops;
1186         int ret;
1187
1188         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1189             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1190                 return 0;
1191
1192         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1193                 tw32_f(MAC_MI_MODE,
1194                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1195                 udelay(80);
1196         }
1197
1198         tg3_ape_lock(tp, tp->phy_ape_lock);
1199
1200         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1201                       MI_COM_PHY_ADDR_MASK);
1202         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1203                       MI_COM_REG_ADDR_MASK);
1204         frame_val |= (val & MI_COM_DATA_MASK);
1205         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1206
1207         tw32_f(MAC_MI_COM, frame_val);
1208
1209         loops = PHY_BUSY_LOOPS;
1210         while (loops != 0) {
1211                 udelay(10);
1212                 frame_val = tr32(MAC_MI_COM);
1213                 if ((frame_val & MI_COM_BUSY) == 0) {
1214                         udelay(5);
1215                         frame_val = tr32(MAC_MI_COM);
1216                         break;
1217                 }
1218                 loops -= 1;
1219         }
1220
1221         ret = -EBUSY;
1222         if (loops != 0)
1223                 ret = 0;
1224
1225         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1226                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1227                 udelay(80);
1228         }
1229
1230         tg3_ape_unlock(tp, tp->phy_ape_lock);
1231
1232         return ret;
1233 }
1234
1235 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1236 {
1237         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1238 }
1239
1240 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1241 {
1242         int err;
1243
1244         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1245         if (err)
1246                 goto done;
1247
1248         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1253                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1254         if (err)
1255                 goto done;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1258
1259 done:
1260         return err;
1261 }
1262
1263 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1264 {
1265         int err;
1266
1267         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1268         if (err)
1269                 goto done;
1270
1271         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1272         if (err)
1273                 goto done;
1274
1275         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1276                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1277         if (err)
1278                 goto done;
1279
1280         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1281
1282 done:
1283         return err;
1284 }
1285
1286 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1287 {
1288         int err;
1289
1290         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1291         if (!err)
1292                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1293
1294         return err;
1295 }
1296
1297 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1298 {
1299         int err;
1300
1301         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1302         if (!err)
1303                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1304
1305         return err;
1306 }
1307
1308 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1309 {
1310         int err;
1311
1312         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1313                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1314                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1315         if (!err)
1316                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1317
1318         return err;
1319 }
1320
1321 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1322 {
1323         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1324                 set |= MII_TG3_AUXCTL_MISC_WREN;
1325
1326         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1327 }
1328
1329 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1330 {
1331         u32 val;
1332         int err;
1333
1334         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1335
1336         if (err)
1337                 return err;
1338
1339         if (enable)
1340                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1341         else
1342                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1343
1344         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1345                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1346
1347         return err;
1348 }
1349
1350 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1351 {
1352         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1353                             reg | val | MII_TG3_MISC_SHDW_WREN);
1354 }
1355
1356 static int tg3_bmcr_reset(struct tg3 *tp)
1357 {
1358         u32 phy_control;
1359         int limit, err;
1360
1361         /* OK, reset it, and poll the BMCR_RESET bit until it
1362          * clears or we time out.
1363          */
1364         phy_control = BMCR_RESET;
1365         err = tg3_writephy(tp, MII_BMCR, phy_control);
1366         if (err != 0)
1367                 return -EBUSY;
1368
1369         limit = 5000;
1370         while (limit--) {
1371                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1372                 if (err != 0)
1373                         return -EBUSY;
1374
1375                 if ((phy_control & BMCR_RESET) == 0) {
1376                         udelay(40);
1377                         break;
1378                 }
1379                 udelay(10);
1380         }
1381         if (limit < 0)
1382                 return -EBUSY;
1383
1384         return 0;
1385 }
1386
1387 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1388 {
1389         struct tg3 *tp = bp->priv;
1390         u32 val;
1391
1392         spin_lock_bh(&tp->lock);
1393
1394         if (__tg3_readphy(tp, mii_id, reg, &val))
1395                 val = -EIO;
1396
1397         spin_unlock_bh(&tp->lock);
1398
1399         return val;
1400 }
1401
1402 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1403 {
1404         struct tg3 *tp = bp->priv;
1405         u32 ret = 0;
1406
1407         spin_lock_bh(&tp->lock);
1408
1409         if (__tg3_writephy(tp, mii_id, reg, val))
1410                 ret = -EIO;
1411
1412         spin_unlock_bh(&tp->lock);
1413
1414         return ret;
1415 }
1416
1417 static void tg3_mdio_config_5785(struct tg3 *tp)
1418 {
1419         u32 val;
1420         struct phy_device *phydev;
1421
1422         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1423         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1424         case PHY_ID_BCM50610:
1425         case PHY_ID_BCM50610M:
1426                 val = MAC_PHYCFG2_50610_LED_MODES;
1427                 break;
1428         case PHY_ID_BCMAC131:
1429                 val = MAC_PHYCFG2_AC131_LED_MODES;
1430                 break;
1431         case PHY_ID_RTL8211C:
1432                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1433                 break;
1434         case PHY_ID_RTL8201E:
1435                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1436                 break;
1437         default:
1438                 return;
1439         }
1440
1441         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1442                 tw32(MAC_PHYCFG2, val);
1443
1444                 val = tr32(MAC_PHYCFG1);
1445                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1446                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1447                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1448                 tw32(MAC_PHYCFG1, val);
1449
1450                 return;
1451         }
1452
1453         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1454                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1455                        MAC_PHYCFG2_FMODE_MASK_MASK |
1456                        MAC_PHYCFG2_GMODE_MASK_MASK |
1457                        MAC_PHYCFG2_ACT_MASK_MASK   |
1458                        MAC_PHYCFG2_QUAL_MASK_MASK |
1459                        MAC_PHYCFG2_INBAND_ENABLE;
1460
1461         tw32(MAC_PHYCFG2, val);
1462
1463         val = tr32(MAC_PHYCFG1);
1464         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1465                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1466         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1468                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1469                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1470                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1471         }
1472         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1473                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1474         tw32(MAC_PHYCFG1, val);
1475
1476         val = tr32(MAC_EXT_RGMII_MODE);
1477         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1478                  MAC_RGMII_MODE_RX_QUALITY |
1479                  MAC_RGMII_MODE_RX_ACTIVITY |
1480                  MAC_RGMII_MODE_RX_ENG_DET |
1481                  MAC_RGMII_MODE_TX_ENABLE |
1482                  MAC_RGMII_MODE_TX_LOWPWR |
1483                  MAC_RGMII_MODE_TX_RESET);
1484         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1485                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1486                         val |= MAC_RGMII_MODE_RX_INT_B |
1487                                MAC_RGMII_MODE_RX_QUALITY |
1488                                MAC_RGMII_MODE_RX_ACTIVITY |
1489                                MAC_RGMII_MODE_RX_ENG_DET;
1490                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1491                         val |= MAC_RGMII_MODE_TX_ENABLE |
1492                                MAC_RGMII_MODE_TX_LOWPWR |
1493                                MAC_RGMII_MODE_TX_RESET;
1494         }
1495         tw32(MAC_EXT_RGMII_MODE, val);
1496 }
1497
1498 static void tg3_mdio_start(struct tg3 *tp)
1499 {
1500         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1501         tw32_f(MAC_MI_MODE, tp->mi_mode);
1502         udelay(80);
1503
1504         if (tg3_flag(tp, MDIOBUS_INITED) &&
1505             tg3_asic_rev(tp) == ASIC_REV_5785)
1506                 tg3_mdio_config_5785(tp);
1507 }
1508
1509 static int tg3_mdio_init(struct tg3 *tp)
1510 {
1511         int i;
1512         u32 reg;
1513         struct phy_device *phydev;
1514
1515         if (tg3_flag(tp, 5717_PLUS)) {
1516                 u32 is_serdes;
1517
1518                 tp->phy_addr = tp->pci_fn + 1;
1519
1520                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1521                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1522                 else
1523                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1524                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1525                 if (is_serdes)
1526                         tp->phy_addr += 7;
1527         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1528                 int addr;
1529
1530                 addr = ssb_gige_get_phyaddr(tp->pdev);
1531                 if (addr < 0)
1532                         return addr;
1533                 tp->phy_addr = addr;
1534         } else
1535                 tp->phy_addr = TG3_PHY_MII_ADDR;
1536
1537         tg3_mdio_start(tp);
1538
1539         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1540                 return 0;
1541
1542         tp->mdio_bus = mdiobus_alloc();
1543         if (tp->mdio_bus == NULL)
1544                 return -ENOMEM;
1545
1546         tp->mdio_bus->name     = "tg3 mdio bus";
1547         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1548                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1549         tp->mdio_bus->priv     = tp;
1550         tp->mdio_bus->parent   = &tp->pdev->dev;
1551         tp->mdio_bus->read     = &tg3_mdio_read;
1552         tp->mdio_bus->write    = &tg3_mdio_write;
1553         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1554
1555         /* The bus registration will look for all the PHYs on the mdio bus.
1556          * Unfortunately, it does not ensure the PHY is powered up before
1557          * accessing the PHY ID registers.  A chip reset is the
1558          * quickest way to bring the device back to an operational state..
1559          */
1560         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1561                 tg3_bmcr_reset(tp);
1562
1563         i = mdiobus_register(tp->mdio_bus);
1564         if (i) {
1565                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1566                 mdiobus_free(tp->mdio_bus);
1567                 return i;
1568         }
1569
1570         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1571
1572         if (!phydev || !phydev->drv) {
1573                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1574                 mdiobus_unregister(tp->mdio_bus);
1575                 mdiobus_free(tp->mdio_bus);
1576                 return -ENODEV;
1577         }
1578
1579         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1580         case PHY_ID_BCM57780:
1581                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1582                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 break;
1584         case PHY_ID_BCM50610:
1585         case PHY_ID_BCM50610M:
1586                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1587                                      PHY_BRCM_RX_REFCLK_UNUSED |
1588                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1589                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1590                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1591                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1592                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1593                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1594                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1595                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1596                 /* fall through */
1597         case PHY_ID_RTL8211C:
1598                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1599                 break;
1600         case PHY_ID_RTL8201E:
1601         case PHY_ID_BCMAC131:
1602                 phydev->interface = PHY_INTERFACE_MODE_MII;
1603                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1604                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1605                 break;
1606         }
1607
1608         tg3_flag_set(tp, MDIOBUS_INITED);
1609
1610         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1611                 tg3_mdio_config_5785(tp);
1612
1613         return 0;
1614 }
1615
1616 static void tg3_mdio_fini(struct tg3 *tp)
1617 {
1618         if (tg3_flag(tp, MDIOBUS_INITED)) {
1619                 tg3_flag_clear(tp, MDIOBUS_INITED);
1620                 mdiobus_unregister(tp->mdio_bus);
1621                 mdiobus_free(tp->mdio_bus);
1622         }
1623 }
1624
1625 /* tp->lock is held. */
1626 static inline void tg3_generate_fw_event(struct tg3 *tp)
1627 {
1628         u32 val;
1629
1630         val = tr32(GRC_RX_CPU_EVENT);
1631         val |= GRC_RX_CPU_DRIVER_EVENT;
1632         tw32_f(GRC_RX_CPU_EVENT, val);
1633
1634         tp->last_event_jiffies = jiffies;
1635 }
1636
1637 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1638
1639 /* tp->lock is held. */
1640 static void tg3_wait_for_event_ack(struct tg3 *tp)
1641 {
1642         int i;
1643         unsigned int delay_cnt;
1644         long time_remain;
1645
1646         /* If enough time has passed, no wait is necessary. */
1647         time_remain = (long)(tp->last_event_jiffies + 1 +
1648                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1649                       (long)jiffies;
1650         if (time_remain < 0)
1651                 return;
1652
1653         /* Check if we can shorten the wait time. */
1654         delay_cnt = jiffies_to_usecs(time_remain);
1655         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1656                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1657         delay_cnt = (delay_cnt >> 3) + 1;
1658
1659         for (i = 0; i < delay_cnt; i++) {
1660                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1661                         break;
1662                 if (pci_channel_offline(tp->pdev))
1663                         break;
1664
1665                 udelay(8);
1666         }
1667 }
1668
1669 /* tp->lock is held. */
1670 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1671 {
1672         u32 reg, val;
1673
1674         val = 0;
1675         if (!tg3_readphy(tp, MII_BMCR, &reg))
1676                 val = reg << 16;
1677         if (!tg3_readphy(tp, MII_BMSR, &reg))
1678                 val |= (reg & 0xffff);
1679         *data++ = val;
1680
1681         val = 0;
1682         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1683                 val = reg << 16;
1684         if (!tg3_readphy(tp, MII_LPA, &reg))
1685                 val |= (reg & 0xffff);
1686         *data++ = val;
1687
1688         val = 0;
1689         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1690                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1691                         val = reg << 16;
1692                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1693                         val |= (reg & 0xffff);
1694         }
1695         *data++ = val;
1696
1697         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1698                 val = reg << 16;
1699         else
1700                 val = 0;
1701         *data++ = val;
1702 }
1703
1704 /* tp->lock is held. */
1705 static void tg3_ump_link_report(struct tg3 *tp)
1706 {
1707         u32 data[4];
1708
1709         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1710                 return;
1711
1712         tg3_phy_gather_ump_data(tp, data);
1713
1714         tg3_wait_for_event_ack(tp);
1715
1716         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1717         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1718         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1719         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1720         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1721         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1722
1723         tg3_generate_fw_event(tp);
1724 }
1725
1726 /* tp->lock is held. */
1727 static void tg3_stop_fw(struct tg3 *tp)
1728 {
1729         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1730                 /* Wait for RX cpu to ACK the previous event. */
1731                 tg3_wait_for_event_ack(tp);
1732
1733                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1734
1735                 tg3_generate_fw_event(tp);
1736
1737                 /* Wait for RX cpu to ACK this event. */
1738                 tg3_wait_for_event_ack(tp);
1739         }
1740 }
1741
1742 /* tp->lock is held. */
1743 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1744 {
1745         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1746                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1747
1748         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1749                 switch (kind) {
1750                 case RESET_KIND_INIT:
1751                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752                                       DRV_STATE_START);
1753                         break;
1754
1755                 case RESET_KIND_SHUTDOWN:
1756                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757                                       DRV_STATE_UNLOAD);
1758                         break;
1759
1760                 case RESET_KIND_SUSPEND:
1761                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762                                       DRV_STATE_SUSPEND);
1763                         break;
1764
1765                 default:
1766                         break;
1767                 }
1768         }
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1773 {
1774         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1775                 switch (kind) {
1776                 case RESET_KIND_INIT:
1777                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778                                       DRV_STATE_START_DONE);
1779                         break;
1780
1781                 case RESET_KIND_SHUTDOWN:
1782                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783                                       DRV_STATE_UNLOAD_DONE);
1784                         break;
1785
1786                 default:
1787                         break;
1788                 }
1789         }
1790 }
1791
1792 /* tp->lock is held. */
1793 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1794 {
1795         if (tg3_flag(tp, ENABLE_ASF)) {
1796                 switch (kind) {
1797                 case RESET_KIND_INIT:
1798                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1799                                       DRV_STATE_START);
1800                         break;
1801
1802                 case RESET_KIND_SHUTDOWN:
1803                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804                                       DRV_STATE_UNLOAD);
1805                         break;
1806
1807                 case RESET_KIND_SUSPEND:
1808                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809                                       DRV_STATE_SUSPEND);
1810                         break;
1811
1812                 default:
1813                         break;
1814                 }
1815         }
1816 }
1817
1818 static int tg3_poll_fw(struct tg3 *tp)
1819 {
1820         int i;
1821         u32 val;
1822
1823         if (tg3_flag(tp, NO_FWARE_REPORTED))
1824                 return 0;
1825
1826         if (tg3_flag(tp, IS_SSB_CORE)) {
1827                 /* We don't use firmware. */
1828                 return 0;
1829         }
1830
1831         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1832                 /* Wait up to 20ms for init done. */
1833                 for (i = 0; i < 200; i++) {
1834                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1835                                 return 0;
1836                         if (pci_channel_offline(tp->pdev))
1837                                 return -ENODEV;
1838
1839                         udelay(100);
1840                 }
1841                 return -ENODEV;
1842         }
1843
1844         /* Wait for firmware initialization to complete. */
1845         for (i = 0; i < 100000; i++) {
1846                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1847                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1848                         break;
1849                 if (pci_channel_offline(tp->pdev)) {
1850                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1851                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1852                                 netdev_info(tp->dev, "No firmware running\n");
1853                         }
1854
1855                         break;
1856                 }
1857
1858                 udelay(10);
1859         }
1860
1861         /* Chip might not be fitted with firmware.  Some Sun onboard
1862          * parts are configured like that.  So don't signal the timeout
1863          * of the above loop as an error, but do report the lack of
1864          * running firmware once.
1865          */
1866         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1867                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1868
1869                 netdev_info(tp->dev, "No firmware running\n");
1870         }
1871
1872         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1873                 /* The 57765 A0 needs a little more
1874                  * time to do some important work.
1875                  */
1876                 mdelay(10);
1877         }
1878
1879         return 0;
1880 }
1881
1882 static void tg3_link_report(struct tg3 *tp)
1883 {
1884         if (!netif_carrier_ok(tp->dev)) {
1885                 netif_info(tp, link, tp->dev, "Link is down\n");
1886                 tg3_ump_link_report(tp);
1887         } else if (netif_msg_link(tp)) {
1888                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1889                             (tp->link_config.active_speed == SPEED_1000 ?
1890                              1000 :
1891                              (tp->link_config.active_speed == SPEED_100 ?
1892                               100 : 10)),
1893                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1894                              "full" : "half"));
1895
1896                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1897                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1898                             "on" : "off",
1899                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1900                             "on" : "off");
1901
1902                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1903                         netdev_info(tp->dev, "EEE is %s\n",
1904                                     tp->setlpicnt ? "enabled" : "disabled");
1905
1906                 tg3_ump_link_report(tp);
1907         }
1908
1909         tp->link_up = netif_carrier_ok(tp->dev);
1910 }
1911
1912 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1913 {
1914         u32 flowctrl = 0;
1915
1916         if (adv & ADVERTISE_PAUSE_CAP) {
1917                 flowctrl |= FLOW_CTRL_RX;
1918                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1919                         flowctrl |= FLOW_CTRL_TX;
1920         } else if (adv & ADVERTISE_PAUSE_ASYM)
1921                 flowctrl |= FLOW_CTRL_TX;
1922
1923         return flowctrl;
1924 }
1925
1926 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1927 {
1928         u16 miireg;
1929
1930         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1931                 miireg = ADVERTISE_1000XPAUSE;
1932         else if (flow_ctrl & FLOW_CTRL_TX)
1933                 miireg = ADVERTISE_1000XPSE_ASYM;
1934         else if (flow_ctrl & FLOW_CTRL_RX)
1935                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1936         else
1937                 miireg = 0;
1938
1939         return miireg;
1940 }
1941
1942 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1943 {
1944         u32 flowctrl = 0;
1945
1946         if (adv & ADVERTISE_1000XPAUSE) {
1947                 flowctrl |= FLOW_CTRL_RX;
1948                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1949                         flowctrl |= FLOW_CTRL_TX;
1950         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1951                 flowctrl |= FLOW_CTRL_TX;
1952
1953         return flowctrl;
1954 }
1955
1956 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1957 {
1958         u8 cap = 0;
1959
1960         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1961                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1962         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1963                 if (lcladv & ADVERTISE_1000XPAUSE)
1964                         cap = FLOW_CTRL_RX;
1965                 if (rmtadv & ADVERTISE_1000XPAUSE)
1966                         cap = FLOW_CTRL_TX;
1967         }
1968
1969         return cap;
1970 }
1971
1972 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1973 {
1974         u8 autoneg;
1975         u8 flowctrl = 0;
1976         u32 old_rx_mode = tp->rx_mode;
1977         u32 old_tx_mode = tp->tx_mode;
1978
1979         if (tg3_flag(tp, USE_PHYLIB))
1980                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1981         else
1982                 autoneg = tp->link_config.autoneg;
1983
1984         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1985                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1986                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1987                 else
1988                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1989         } else
1990                 flowctrl = tp->link_config.flowctrl;
1991
1992         tp->link_config.active_flowctrl = flowctrl;
1993
1994         if (flowctrl & FLOW_CTRL_RX)
1995                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1996         else
1997                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1998
1999         if (old_rx_mode != tp->rx_mode)
2000                 tw32_f(MAC_RX_MODE, tp->rx_mode);
2001
2002         if (flowctrl & FLOW_CTRL_TX)
2003                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2004         else
2005                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2006
2007         if (old_tx_mode != tp->tx_mode)
2008                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2009 }
2010
2011 static void tg3_adjust_link(struct net_device *dev)
2012 {
2013         u8 oldflowctrl, linkmesg = 0;
2014         u32 mac_mode, lcl_adv, rmt_adv;
2015         struct tg3 *tp = netdev_priv(dev);
2016         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2017
2018         spin_lock_bh(&tp->lock);
2019
2020         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2021                                     MAC_MODE_HALF_DUPLEX);
2022
2023         oldflowctrl = tp->link_config.active_flowctrl;
2024
2025         if (phydev->link) {
2026                 lcl_adv = 0;
2027                 rmt_adv = 0;
2028
2029                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2030                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2031                 else if (phydev->speed == SPEED_1000 ||
2032                          tg3_asic_rev(tp) != ASIC_REV_5785)
2033                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2034                 else
2035                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2036
2037                 if (phydev->duplex == DUPLEX_HALF)
2038                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2039                 else {
2040                         lcl_adv = mii_advertise_flowctrl(
2041                                   tp->link_config.flowctrl);
2042
2043                         if (phydev->pause)
2044                                 rmt_adv = LPA_PAUSE_CAP;
2045                         if (phydev->asym_pause)
2046                                 rmt_adv |= LPA_PAUSE_ASYM;
2047                 }
2048
2049                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2050         } else
2051                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2052
2053         if (mac_mode != tp->mac_mode) {
2054                 tp->mac_mode = mac_mode;
2055                 tw32_f(MAC_MODE, tp->mac_mode);
2056                 udelay(40);
2057         }
2058
2059         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2060                 if (phydev->speed == SPEED_10)
2061                         tw32(MAC_MI_STAT,
2062                              MAC_MI_STAT_10MBPS_MODE |
2063                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2064                 else
2065                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2066         }
2067
2068         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2069                 tw32(MAC_TX_LENGTHS,
2070                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2071                       (6 << TX_LENGTHS_IPG_SHIFT) |
2072                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2073         else
2074                 tw32(MAC_TX_LENGTHS,
2075                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076                       (6 << TX_LENGTHS_IPG_SHIFT) |
2077                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078
2079         if (phydev->link != tp->old_link ||
2080             phydev->speed != tp->link_config.active_speed ||
2081             phydev->duplex != tp->link_config.active_duplex ||
2082             oldflowctrl != tp->link_config.active_flowctrl)
2083                 linkmesg = 1;
2084
2085         tp->old_link = phydev->link;
2086         tp->link_config.active_speed = phydev->speed;
2087         tp->link_config.active_duplex = phydev->duplex;
2088
2089         spin_unlock_bh(&tp->lock);
2090
2091         if (linkmesg)
2092                 tg3_link_report(tp);
2093 }
2094
2095 static int tg3_phy_init(struct tg3 *tp)
2096 {
2097         struct phy_device *phydev;
2098
2099         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2100                 return 0;
2101
2102         /* Bring the PHY back to a known state. */
2103         tg3_bmcr_reset(tp);
2104
2105         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2106
2107         /* Attach the MAC to the PHY. */
2108         phydev = phy_connect(tp->dev, phydev_name(phydev),
2109                              tg3_adjust_link, phydev->interface);
2110         if (IS_ERR(phydev)) {
2111                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2112                 return PTR_ERR(phydev);
2113         }
2114
2115         /* Mask with MAC supported features. */
2116         switch (phydev->interface) {
2117         case PHY_INTERFACE_MODE_GMII:
2118         case PHY_INTERFACE_MODE_RGMII:
2119                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2120                         phy_set_max_speed(phydev, SPEED_1000);
2121                         phy_support_asym_pause(phydev);
2122                         break;
2123                 }
2124                 /* fall through */
2125         case PHY_INTERFACE_MODE_MII:
2126                 phy_set_max_speed(phydev, SPEED_100);
2127                 phy_support_asym_pause(phydev);
2128                 break;
2129         default:
2130                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2131                 return -EINVAL;
2132         }
2133
2134         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2135
2136         phy_attached_info(phydev);
2137
2138         return 0;
2139 }
2140
2141 static void tg3_phy_start(struct tg3 *tp)
2142 {
2143         struct phy_device *phydev;
2144
2145         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2146                 return;
2147
2148         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2149
2150         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2151                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2152                 phydev->speed = tp->link_config.speed;
2153                 phydev->duplex = tp->link_config.duplex;
2154                 phydev->autoneg = tp->link_config.autoneg;
2155                 ethtool_convert_legacy_u32_to_link_mode(
2156                         phydev->advertising, tp->link_config.advertising);
2157         }
2158
2159         phy_start(phydev);
2160
2161         phy_start_aneg(phydev);
2162 }
2163
2164 static void tg3_phy_stop(struct tg3 *tp)
2165 {
2166         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2167                 return;
2168
2169         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2170 }
2171
2172 static void tg3_phy_fini(struct tg3 *tp)
2173 {
2174         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2175                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2176                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2177         }
2178 }
2179
2180 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2181 {
2182         int err;
2183         u32 val;
2184
2185         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2186                 return 0;
2187
2188         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2189                 /* Cannot do read-modify-write on 5401 */
2190                 err = tg3_phy_auxctl_write(tp,
2191                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2192                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2193                                            0x4c20);
2194                 goto done;
2195         }
2196
2197         err = tg3_phy_auxctl_read(tp,
2198                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2199         if (err)
2200                 return err;
2201
2202         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2203         err = tg3_phy_auxctl_write(tp,
2204                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2205
2206 done:
2207         return err;
2208 }
2209
2210 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2211 {
2212         u32 phytest;
2213
2214         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2215                 u32 phy;
2216
2217                 tg3_writephy(tp, MII_TG3_FET_TEST,
2218                              phytest | MII_TG3_FET_SHADOW_EN);
2219                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2220                         if (enable)
2221                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2222                         else
2223                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2224                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2225                 }
2226                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2227         }
2228 }
2229
2230 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2231 {
2232         u32 reg;
2233
2234         if (!tg3_flag(tp, 5705_PLUS) ||
2235             (tg3_flag(tp, 5717_PLUS) &&
2236              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2237                 return;
2238
2239         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240                 tg3_phy_fet_toggle_apd(tp, enable);
2241                 return;
2242         }
2243
2244         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2245               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2246               MII_TG3_MISC_SHDW_SCR5_SDTL |
2247               MII_TG3_MISC_SHDW_SCR5_C125OE;
2248         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2249                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2250
2251         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2252
2253
2254         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2255         if (enable)
2256                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2257
2258         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2259 }
2260
2261 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2262 {
2263         u32 phy;
2264
2265         if (!tg3_flag(tp, 5705_PLUS) ||
2266             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2267                 return;
2268
2269         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2270                 u32 ephy;
2271
2272                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2273                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2274
2275                         tg3_writephy(tp, MII_TG3_FET_TEST,
2276                                      ephy | MII_TG3_FET_SHADOW_EN);
2277                         if (!tg3_readphy(tp, reg, &phy)) {
2278                                 if (enable)
2279                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2280                                 else
2281                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2282                                 tg3_writephy(tp, reg, phy);
2283                         }
2284                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2285                 }
2286         } else {
2287                 int ret;
2288
2289                 ret = tg3_phy_auxctl_read(tp,
2290                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2291                 if (!ret) {
2292                         if (enable)
2293                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2294                         else
2295                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2296                         tg3_phy_auxctl_write(tp,
2297                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2298                 }
2299         }
2300 }
2301
2302 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2303 {
2304         int ret;
2305         u32 val;
2306
2307         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2308                 return;
2309
2310         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2311         if (!ret)
2312                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2313                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2314 }
2315
2316 static void tg3_phy_apply_otp(struct tg3 *tp)
2317 {
2318         u32 otp, phy;
2319
2320         if (!tp->phy_otp)
2321                 return;
2322
2323         otp = tp->phy_otp;
2324
2325         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2326                 return;
2327
2328         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2329         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2330         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2331
2332         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2333               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2334         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2335
2336         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2337         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2338         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2339
2340         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2341         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2342
2343         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2344         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2345
2346         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2347               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2348         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2349
2350         tg3_phy_toggle_auxctl_smdsp(tp, false);
2351 }
2352
2353 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2354 {
2355         u32 val;
2356         struct ethtool_eee *dest = &tp->eee;
2357
2358         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2359                 return;
2360
2361         if (eee)
2362                 dest = eee;
2363
2364         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2365                 return;
2366
2367         /* Pull eee_active */
2368         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2369             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2370                 dest->eee_active = 1;
2371         } else
2372                 dest->eee_active = 0;
2373
2374         /* Pull lp advertised settings */
2375         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2376                 return;
2377         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2378
2379         /* Pull advertised and eee_enabled settings */
2380         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2381                 return;
2382         dest->eee_enabled = !!val;
2383         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2384
2385         /* Pull tx_lpi_enabled */
2386         val = tr32(TG3_CPMU_EEE_MODE);
2387         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2388
2389         /* Pull lpi timer value */
2390         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2391 }
2392
2393 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2394 {
2395         u32 val;
2396
2397         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2398                 return;
2399
2400         tp->setlpicnt = 0;
2401
2402         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2403             current_link_up &&
2404             tp->link_config.active_duplex == DUPLEX_FULL &&
2405             (tp->link_config.active_speed == SPEED_100 ||
2406              tp->link_config.active_speed == SPEED_1000)) {
2407                 u32 eeectl;
2408
2409                 if (tp->link_config.active_speed == SPEED_1000)
2410                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2411                 else
2412                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2413
2414                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2415
2416                 tg3_eee_pull_config(tp, NULL);
2417                 if (tp->eee.eee_active)
2418                         tp->setlpicnt = 2;
2419         }
2420
2421         if (!tp->setlpicnt) {
2422                 if (current_link_up &&
2423                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2424                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2425                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2426                 }
2427
2428                 val = tr32(TG3_CPMU_EEE_MODE);
2429                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2430         }
2431 }
2432
2433 static void tg3_phy_eee_enable(struct tg3 *tp)
2434 {
2435         u32 val;
2436
2437         if (tp->link_config.active_speed == SPEED_1000 &&
2438             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2439              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2440              tg3_flag(tp, 57765_CLASS)) &&
2441             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2442                 val = MII_TG3_DSP_TAP26_ALNOKO |
2443                       MII_TG3_DSP_TAP26_RMRXSTO;
2444                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2445                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2446         }
2447
2448         val = tr32(TG3_CPMU_EEE_MODE);
2449         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2450 }
2451
2452 static int tg3_wait_macro_done(struct tg3 *tp)
2453 {
2454         int limit = 100;
2455
2456         while (limit--) {
2457                 u32 tmp32;
2458
2459                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2460                         if ((tmp32 & 0x1000) == 0)
2461                                 break;
2462                 }
2463         }
2464         if (limit < 0)
2465                 return -EBUSY;
2466
2467         return 0;
2468 }
2469
2470 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2471 {
2472         static const u32 test_pat[4][6] = {
2473         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2474         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2475         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2476         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2477         };
2478         int chan;
2479
2480         for (chan = 0; chan < 4; chan++) {
2481                 int i;
2482
2483                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2484                              (chan * 0x2000) | 0x0200);
2485                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2486
2487                 for (i = 0; i < 6; i++)
2488                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2489                                      test_pat[chan][i]);
2490
2491                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2492                 if (tg3_wait_macro_done(tp)) {
2493                         *resetp = 1;
2494                         return -EBUSY;
2495                 }
2496
2497                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498                              (chan * 0x2000) | 0x0200);
2499                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2500                 if (tg3_wait_macro_done(tp)) {
2501                         *resetp = 1;
2502                         return -EBUSY;
2503                 }
2504
2505                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2506                 if (tg3_wait_macro_done(tp)) {
2507                         *resetp = 1;
2508                         return -EBUSY;
2509                 }
2510
2511                 for (i = 0; i < 6; i += 2) {
2512                         u32 low, high;
2513
2514                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2515                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2516                             tg3_wait_macro_done(tp)) {
2517                                 *resetp = 1;
2518                                 return -EBUSY;
2519                         }
2520                         low &= 0x7fff;
2521                         high &= 0x000f;
2522                         if (low != test_pat[chan][i] ||
2523                             high != test_pat[chan][i+1]) {
2524                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2525                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2526                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2527
2528                                 return -EBUSY;
2529                         }
2530                 }
2531         }
2532
2533         return 0;
2534 }
2535
2536 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2537 {
2538         int chan;
2539
2540         for (chan = 0; chan < 4; chan++) {
2541                 int i;
2542
2543                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2544                              (chan * 0x2000) | 0x0200);
2545                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2546                 for (i = 0; i < 6; i++)
2547                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2548                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2549                 if (tg3_wait_macro_done(tp))
2550                         return -EBUSY;
2551         }
2552
2553         return 0;
2554 }
2555
2556 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2557 {
2558         u32 reg32, phy9_orig;
2559         int retries, do_phy_reset, err;
2560
2561         retries = 10;
2562         do_phy_reset = 1;
2563         do {
2564                 if (do_phy_reset) {
2565                         err = tg3_bmcr_reset(tp);
2566                         if (err)
2567                                 return err;
2568                         do_phy_reset = 0;
2569                 }
2570
2571                 /* Disable transmitter and interrupt.  */
2572                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2573                         continue;
2574
2575                 reg32 |= 0x3000;
2576                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2577
2578                 /* Set full-duplex, 1000 mbps.  */
2579                 tg3_writephy(tp, MII_BMCR,
2580                              BMCR_FULLDPLX | BMCR_SPEED1000);
2581
2582                 /* Set to master mode.  */
2583                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2584                         continue;
2585
2586                 tg3_writephy(tp, MII_CTRL1000,
2587                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2588
2589                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2590                 if (err)
2591                         return err;
2592
2593                 /* Block the PHY control access.  */
2594                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2595
2596                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2597                 if (!err)
2598                         break;
2599         } while (--retries);
2600
2601         err = tg3_phy_reset_chanpat(tp);
2602         if (err)
2603                 return err;
2604
2605         tg3_phydsp_write(tp, 0x8005, 0x0000);
2606
2607         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2608         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2609
2610         tg3_phy_toggle_auxctl_smdsp(tp, false);
2611
2612         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2613
2614         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2615         if (err)
2616                 return err;
2617
2618         reg32 &= ~0x3000;
2619         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2620
2621         return 0;
2622 }
2623
2624 static void tg3_carrier_off(struct tg3 *tp)
2625 {
2626         netif_carrier_off(tp->dev);
2627         tp->link_up = false;
2628 }
2629
2630 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2631 {
2632         if (tg3_flag(tp, ENABLE_ASF))
2633                 netdev_warn(tp->dev,
2634                             "Management side-band traffic will be interrupted during phy settings change\n");
2635 }
2636
2637 /* This will reset the tigon3 PHY if there is no valid
2638  * link unless the FORCE argument is non-zero.
2639  */
2640 static int tg3_phy_reset(struct tg3 *tp)
2641 {
2642         u32 val, cpmuctrl;
2643         int err;
2644
2645         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2646                 val = tr32(GRC_MISC_CFG);
2647                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2648                 udelay(40);
2649         }
2650         err  = tg3_readphy(tp, MII_BMSR, &val);
2651         err |= tg3_readphy(tp, MII_BMSR, &val);
2652         if (err != 0)
2653                 return -EBUSY;
2654
2655         if (netif_running(tp->dev) && tp->link_up) {
2656                 netif_carrier_off(tp->dev);
2657                 tg3_link_report(tp);
2658         }
2659
2660         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2661             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2662             tg3_asic_rev(tp) == ASIC_REV_5705) {
2663                 err = tg3_phy_reset_5703_4_5(tp);
2664                 if (err)
2665                         return err;
2666                 goto out;
2667         }
2668
2669         cpmuctrl = 0;
2670         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2671             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2672                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2673                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2674                         tw32(TG3_CPMU_CTRL,
2675                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2676         }
2677
2678         err = tg3_bmcr_reset(tp);
2679         if (err)
2680                 return err;
2681
2682         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2683                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2684                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2685
2686                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2687         }
2688
2689         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2690             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2691                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2692                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2693                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2694                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2695                         udelay(40);
2696                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2697                 }
2698         }
2699
2700         if (tg3_flag(tp, 5717_PLUS) &&
2701             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2702                 return 0;
2703
2704         tg3_phy_apply_otp(tp);
2705
2706         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2707                 tg3_phy_toggle_apd(tp, true);
2708         else
2709                 tg3_phy_toggle_apd(tp, false);
2710
2711 out:
2712         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2713             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2715                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2716                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2717         }
2718
2719         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2720                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2721                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2722         }
2723
2724         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2725                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2726                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2727                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2728                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2729                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2730                 }
2731         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2732                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2733                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2734                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2735                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2736                                 tg3_writephy(tp, MII_TG3_TEST1,
2737                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2738                         } else
2739                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2740
2741                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2742                 }
2743         }
2744
2745         /* Set Extended packet length bit (bit 14) on all chips that */
2746         /* support jumbo frames */
2747         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2748                 /* Cannot do read-modify-write on 5401 */
2749                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2750         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751                 /* Set bit 14 with read-modify-write to preserve other bits */
2752                 err = tg3_phy_auxctl_read(tp,
2753                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2754                 if (!err)
2755                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2756                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2757         }
2758
2759         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2760          * jumbo frames transmission.
2761          */
2762         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2763                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2764                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2765                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2766         }
2767
2768         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2769                 /* adjust output voltage */
2770                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2771         }
2772
2773         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2774                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2775
2776         tg3_phy_toggle_automdix(tp, true);
2777         tg3_phy_set_wirespeed(tp);
2778         return 0;
2779 }
2780
2781 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2782 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2783 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2784                                           TG3_GPIO_MSG_NEED_VAUX)
2785 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2786         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2787          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2788          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2789          (TG3_GPIO_MSG_DRVR_PRES << 12))
2790
2791 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2792         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2793          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2794          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2795          (TG3_GPIO_MSG_NEED_VAUX << 12))
2796
2797 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2798 {
2799         u32 status, shift;
2800
2801         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2802             tg3_asic_rev(tp) == ASIC_REV_5719)
2803                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2804         else
2805                 status = tr32(TG3_CPMU_DRV_STATUS);
2806
2807         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2808         status &= ~(TG3_GPIO_MSG_MASK << shift);
2809         status |= (newstat << shift);
2810
2811         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2812             tg3_asic_rev(tp) == ASIC_REV_5719)
2813                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2814         else
2815                 tw32(TG3_CPMU_DRV_STATUS, status);
2816
2817         return status >> TG3_APE_GPIO_MSG_SHIFT;
2818 }
2819
2820 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2821 {
2822         if (!tg3_flag(tp, IS_NIC))
2823                 return 0;
2824
2825         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2826             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2827             tg3_asic_rev(tp) == ASIC_REV_5720) {
2828                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2829                         return -EIO;
2830
2831                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2832
2833                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2834                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2835
2836                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2837         } else {
2838                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2839                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2840         }
2841
2842         return 0;
2843 }
2844
2845 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2846 {
2847         u32 grc_local_ctrl;
2848
2849         if (!tg3_flag(tp, IS_NIC) ||
2850             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2851             tg3_asic_rev(tp) == ASIC_REV_5701)
2852                 return;
2853
2854         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2855
2856         tw32_wait_f(GRC_LOCAL_CTRL,
2857                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2858                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2859
2860         tw32_wait_f(GRC_LOCAL_CTRL,
2861                     grc_local_ctrl,
2862                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2863
2864         tw32_wait_f(GRC_LOCAL_CTRL,
2865                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2866                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2867 }
2868
2869 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2870 {
2871         if (!tg3_flag(tp, IS_NIC))
2872                 return;
2873
2874         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2875             tg3_asic_rev(tp) == ASIC_REV_5701) {
2876                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2877                             (GRC_LCLCTRL_GPIO_OE0 |
2878                              GRC_LCLCTRL_GPIO_OE1 |
2879                              GRC_LCLCTRL_GPIO_OE2 |
2880                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2881                              GRC_LCLCTRL_GPIO_OUTPUT1),
2882                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2883         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2884                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2885                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2886                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2887                                      GRC_LCLCTRL_GPIO_OE1 |
2888                                      GRC_LCLCTRL_GPIO_OE2 |
2889                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2890                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2891                                      tp->grc_local_ctrl;
2892                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2893                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2894
2895                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2896                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2897                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2898
2899                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2900                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2901                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2902         } else {
2903                 u32 no_gpio2;
2904                 u32 grc_local_ctrl = 0;
2905
2906                 /* Workaround to prevent overdrawing Amps. */
2907                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2908                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2909                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2910                                     grc_local_ctrl,
2911                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2912                 }
2913
2914                 /* On 5753 and variants, GPIO2 cannot be used. */
2915                 no_gpio2 = tp->nic_sram_data_cfg &
2916                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2917
2918                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2919                                   GRC_LCLCTRL_GPIO_OE1 |
2920                                   GRC_LCLCTRL_GPIO_OE2 |
2921                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2922                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2923                 if (no_gpio2) {
2924                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2925                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2926                 }
2927                 tw32_wait_f(GRC_LOCAL_CTRL,
2928                             tp->grc_local_ctrl | grc_local_ctrl,
2929                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2930
2931                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2932
2933                 tw32_wait_f(GRC_LOCAL_CTRL,
2934                             tp->grc_local_ctrl | grc_local_ctrl,
2935                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2936
2937                 if (!no_gpio2) {
2938                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2939                         tw32_wait_f(GRC_LOCAL_CTRL,
2940                                     tp->grc_local_ctrl | grc_local_ctrl,
2941                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2942                 }
2943         }
2944 }
2945
2946 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2947 {
2948         u32 msg = 0;
2949
2950         /* Serialize power state transitions */
2951         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2952                 return;
2953
2954         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2955                 msg = TG3_GPIO_MSG_NEED_VAUX;
2956
2957         msg = tg3_set_function_status(tp, msg);
2958
2959         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2960                 goto done;
2961
2962         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2963                 tg3_pwrsrc_switch_to_vaux(tp);
2964         else
2965                 tg3_pwrsrc_die_with_vmain(tp);
2966
2967 done:
2968         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2969 }
2970
2971 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2972 {
2973         bool need_vaux = false;
2974
2975         /* The GPIOs do something completely different on 57765. */
2976         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2977                 return;
2978
2979         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2980             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2981             tg3_asic_rev(tp) == ASIC_REV_5720) {
2982                 tg3_frob_aux_power_5717(tp, include_wol ?
2983                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2984                 return;
2985         }
2986
2987         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2988                 struct net_device *dev_peer;
2989
2990                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2991
2992                 /* remove_one() may have been run on the peer. */
2993                 if (dev_peer) {
2994                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2995
2996                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2997                                 return;
2998
2999                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3000                             tg3_flag(tp_peer, ENABLE_ASF))
3001                                 need_vaux = true;
3002                 }
3003         }
3004
3005         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3006             tg3_flag(tp, ENABLE_ASF))
3007                 need_vaux = true;
3008
3009         if (need_vaux)
3010                 tg3_pwrsrc_switch_to_vaux(tp);
3011         else
3012                 tg3_pwrsrc_die_with_vmain(tp);
3013 }
3014
3015 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3016 {
3017         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3018                 return 1;
3019         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3020                 if (speed != SPEED_10)
3021                         return 1;
3022         } else if (speed == SPEED_10)
3023                 return 1;
3024
3025         return 0;
3026 }
3027
3028 static bool tg3_phy_power_bug(struct tg3 *tp)
3029 {
3030         switch (tg3_asic_rev(tp)) {
3031         case ASIC_REV_5700:
3032         case ASIC_REV_5704:
3033                 return true;
3034         case ASIC_REV_5780:
3035                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3036                         return true;
3037                 return false;
3038         case ASIC_REV_5717:
3039                 if (!tp->pci_fn)
3040                         return true;
3041                 return false;
3042         case ASIC_REV_5719:
3043         case ASIC_REV_5720:
3044                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3045                     !tp->pci_fn)
3046                         return true;
3047                 return false;
3048         }
3049
3050         return false;
3051 }
3052
3053 static bool tg3_phy_led_bug(struct tg3 *tp)
3054 {
3055         switch (tg3_asic_rev(tp)) {
3056         case ASIC_REV_5719:
3057         case ASIC_REV_5720:
3058                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3059                     !tp->pci_fn)
3060                         return true;
3061                 return false;
3062         }
3063
3064         return false;
3065 }
3066
3067 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3068 {
3069         u32 val;
3070
3071         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3072                 return;
3073
3074         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3075                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3076                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3077                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3078
3079                         sg_dig_ctrl |=
3080                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3081                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3082                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3083                 }
3084                 return;
3085         }
3086
3087         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3088                 tg3_bmcr_reset(tp);
3089                 val = tr32(GRC_MISC_CFG);
3090                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3091                 udelay(40);
3092                 return;
3093         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3094                 u32 phytest;
3095                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3096                         u32 phy;
3097
3098                         tg3_writephy(tp, MII_ADVERTISE, 0);
3099                         tg3_writephy(tp, MII_BMCR,
3100                                      BMCR_ANENABLE | BMCR_ANRESTART);
3101
3102                         tg3_writephy(tp, MII_TG3_FET_TEST,
3103                                      phytest | MII_TG3_FET_SHADOW_EN);
3104                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3105                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3106                                 tg3_writephy(tp,
3107                                              MII_TG3_FET_SHDW_AUXMODE4,
3108                                              phy);
3109                         }
3110                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3111                 }
3112                 return;
3113         } else if (do_low_power) {
3114                 if (!tg3_phy_led_bug(tp))
3115                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3116                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3117
3118                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3119                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3120                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3121                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3122         }
3123
3124         /* The PHY should not be powered down on some chips because
3125          * of bugs.
3126          */
3127         if (tg3_phy_power_bug(tp))
3128                 return;
3129
3130         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3131             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3132                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3133                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3134                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3135                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3136         }
3137
3138         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3139 }
3140
3141 /* tp->lock is held. */
3142 static int tg3_nvram_lock(struct tg3 *tp)
3143 {
3144         if (tg3_flag(tp, NVRAM)) {
3145                 int i;
3146
3147                 if (tp->nvram_lock_cnt == 0) {
3148                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3149                         for (i = 0; i < 8000; i++) {
3150                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3151                                         break;
3152                                 udelay(20);
3153                         }
3154                         if (i == 8000) {
3155                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3156                                 return -ENODEV;
3157                         }
3158                 }
3159                 tp->nvram_lock_cnt++;
3160         }
3161         return 0;
3162 }
3163
3164 /* tp->lock is held. */
3165 static void tg3_nvram_unlock(struct tg3 *tp)
3166 {
3167         if (tg3_flag(tp, NVRAM)) {
3168                 if (tp->nvram_lock_cnt > 0)
3169                         tp->nvram_lock_cnt--;
3170                 if (tp->nvram_lock_cnt == 0)
3171                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3172         }
3173 }
3174
3175 /* tp->lock is held. */
3176 static void tg3_enable_nvram_access(struct tg3 *tp)
3177 {
3178         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3179                 u32 nvaccess = tr32(NVRAM_ACCESS);
3180
3181                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3182         }
3183 }
3184
3185 /* tp->lock is held. */
3186 static void tg3_disable_nvram_access(struct tg3 *tp)
3187 {
3188         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3189                 u32 nvaccess = tr32(NVRAM_ACCESS);
3190
3191                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3192         }
3193 }
3194
3195 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3196                                         u32 offset, u32 *val)
3197 {
3198         u32 tmp;
3199         int i;
3200
3201         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3202                 return -EINVAL;
3203
3204         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3205                                         EEPROM_ADDR_DEVID_MASK |
3206                                         EEPROM_ADDR_READ);
3207         tw32(GRC_EEPROM_ADDR,
3208              tmp |
3209              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3210              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3211               EEPROM_ADDR_ADDR_MASK) |
3212              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3213
3214         for (i = 0; i < 1000; i++) {
3215                 tmp = tr32(GRC_EEPROM_ADDR);
3216
3217                 if (tmp & EEPROM_ADDR_COMPLETE)
3218                         break;
3219                 msleep(1);
3220         }
3221         if (!(tmp & EEPROM_ADDR_COMPLETE))
3222                 return -EBUSY;
3223
3224         tmp = tr32(GRC_EEPROM_DATA);
3225
3226         /*
3227          * The data will always be opposite the native endian
3228          * format.  Perform a blind byteswap to compensate.
3229          */
3230         *val = swab32(tmp);
3231
3232         return 0;
3233 }
3234
3235 #define NVRAM_CMD_TIMEOUT 10000
3236
3237 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3238 {
3239         int i;
3240
3241         tw32(NVRAM_CMD, nvram_cmd);
3242         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3243                 usleep_range(10, 40);
3244                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3245                         udelay(10);
3246                         break;
3247                 }
3248         }
3249
3250         if (i == NVRAM_CMD_TIMEOUT)
3251                 return -EBUSY;
3252
3253         return 0;
3254 }
3255
3256 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3257 {
3258         if (tg3_flag(tp, NVRAM) &&
3259             tg3_flag(tp, NVRAM_BUFFERED) &&
3260             tg3_flag(tp, FLASH) &&
3261             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3262             (tp->nvram_jedecnum == JEDEC_ATMEL))
3263
3264                 addr = ((addr / tp->nvram_pagesize) <<
3265                         ATMEL_AT45DB0X1B_PAGE_POS) +
3266                        (addr % tp->nvram_pagesize);
3267
3268         return addr;
3269 }
3270
3271 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3272 {
3273         if (tg3_flag(tp, NVRAM) &&
3274             tg3_flag(tp, NVRAM_BUFFERED) &&
3275             tg3_flag(tp, FLASH) &&
3276             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3277             (tp->nvram_jedecnum == JEDEC_ATMEL))
3278
3279                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3280                         tp->nvram_pagesize) +
3281                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3282
3283         return addr;
3284 }
3285
3286 /* NOTE: Data read in from NVRAM is byteswapped according to
3287  * the byteswapping settings for all other register accesses.
3288  * tg3 devices are BE devices, so on a BE machine, the data
3289  * returned will be exactly as it is seen in NVRAM.  On a LE
3290  * machine, the 32-bit value will be byteswapped.
3291  */
3292 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3293 {
3294         int ret;
3295
3296         if (!tg3_flag(tp, NVRAM))
3297                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3298
3299         offset = tg3_nvram_phys_addr(tp, offset);
3300
3301         if (offset > NVRAM_ADDR_MSK)
3302                 return -EINVAL;
3303
3304         ret = tg3_nvram_lock(tp);
3305         if (ret)
3306                 return ret;
3307
3308         tg3_enable_nvram_access(tp);
3309
3310         tw32(NVRAM_ADDR, offset);
3311         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3312                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3313
3314         if (ret == 0)
3315                 *val = tr32(NVRAM_RDDATA);
3316
3317         tg3_disable_nvram_access(tp);
3318
3319         tg3_nvram_unlock(tp);
3320
3321         return ret;
3322 }
3323
3324 /* Ensures NVRAM data is in bytestream format. */
3325 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3326 {
3327         u32 v;
3328         int res = tg3_nvram_read(tp, offset, &v);
3329         if (!res)
3330                 *val = cpu_to_be32(v);
3331         return res;
3332 }
3333
3334 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3335                                     u32 offset, u32 len, u8 *buf)
3336 {
3337         int i, j, rc = 0;
3338         u32 val;
3339
3340         for (i = 0; i < len; i += 4) {
3341                 u32 addr;
3342                 __be32 data;
3343
3344                 addr = offset + i;
3345
3346                 memcpy(&data, buf + i, 4);
3347
3348                 /*
3349                  * The SEEPROM interface expects the data to always be opposite
3350                  * the native endian format.  We accomplish this by reversing
3351                  * all the operations that would have been performed on the
3352                  * data from a call to tg3_nvram_read_be32().
3353                  */
3354                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3355
3356                 val = tr32(GRC_EEPROM_ADDR);
3357                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3358
3359                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3360                         EEPROM_ADDR_READ);
3361                 tw32(GRC_EEPROM_ADDR, val |
3362                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3363                         (addr & EEPROM_ADDR_ADDR_MASK) |
3364                         EEPROM_ADDR_START |
3365                         EEPROM_ADDR_WRITE);
3366
3367                 for (j = 0; j < 1000; j++) {
3368                         val = tr32(GRC_EEPROM_ADDR);
3369
3370                         if (val & EEPROM_ADDR_COMPLETE)
3371                                 break;
3372                         msleep(1);
3373                 }
3374                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3375                         rc = -EBUSY;
3376                         break;
3377                 }
3378         }
3379
3380         return rc;
3381 }
3382
3383 /* offset and length are dword aligned */
3384 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3385                 u8 *buf)
3386 {
3387         int ret = 0;
3388         u32 pagesize = tp->nvram_pagesize;
3389         u32 pagemask = pagesize - 1;
3390         u32 nvram_cmd;
3391         u8 *tmp;
3392
3393         tmp = kmalloc(pagesize, GFP_KERNEL);
3394         if (tmp == NULL)
3395                 return -ENOMEM;
3396
3397         while (len) {
3398                 int j;
3399                 u32 phy_addr, page_off, size;
3400
3401                 phy_addr = offset & ~pagemask;
3402
3403                 for (j = 0; j < pagesize; j += 4) {
3404                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3405                                                   (__be32 *) (tmp + j));
3406                         if (ret)
3407                                 break;
3408                 }
3409                 if (ret)
3410                         break;
3411
3412                 page_off = offset & pagemask;
3413                 size = pagesize;
3414                 if (len < size)
3415                         size = len;
3416
3417                 len -= size;
3418
3419                 memcpy(tmp + page_off, buf, size);
3420
3421                 offset = offset + (pagesize - page_off);
3422
3423                 tg3_enable_nvram_access(tp);
3424
3425                 /*
3426                  * Before we can erase the flash page, we need
3427                  * to issue a special "write enable" command.
3428                  */
3429                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3430
3431                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3432                         break;
3433
3434                 /* Erase the target page */
3435                 tw32(NVRAM_ADDR, phy_addr);
3436
3437                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3438                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3439
3440                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441                         break;
3442
3443                 /* Issue another write enable to start the write. */
3444                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3445
3446                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3447                         break;
3448
3449                 for (j = 0; j < pagesize; j += 4) {
3450                         __be32 data;
3451
3452                         data = *((__be32 *) (tmp + j));
3453
3454                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3455
3456                         tw32(NVRAM_ADDR, phy_addr + j);
3457
3458                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3459                                 NVRAM_CMD_WR;
3460
3461                         if (j == 0)
3462                                 nvram_cmd |= NVRAM_CMD_FIRST;
3463                         else if (j == (pagesize - 4))
3464                                 nvram_cmd |= NVRAM_CMD_LAST;
3465
3466                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3467                         if (ret)
3468                                 break;
3469                 }
3470                 if (ret)
3471                         break;
3472         }
3473
3474         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3475         tg3_nvram_exec_cmd(tp, nvram_cmd);
3476
3477         kfree(tmp);
3478
3479         return ret;
3480 }
3481
3482 /* offset and length are dword aligned */
3483 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3484                 u8 *buf)
3485 {
3486         int i, ret = 0;
3487
3488         for (i = 0; i < len; i += 4, offset += 4) {
3489                 u32 page_off, phy_addr, nvram_cmd;
3490                 __be32 data;
3491
3492                 memcpy(&data, buf + i, 4);
3493                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3494
3495                 page_off = offset % tp->nvram_pagesize;
3496
3497                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3498
3499                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3500
3501                 if (page_off == 0 || i == 0)
3502                         nvram_cmd |= NVRAM_CMD_FIRST;
3503                 if (page_off == (tp->nvram_pagesize - 4))
3504                         nvram_cmd |= NVRAM_CMD_LAST;
3505
3506                 if (i == (len - 4))
3507                         nvram_cmd |= NVRAM_CMD_LAST;
3508
3509                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3510                     !tg3_flag(tp, FLASH) ||
3511                     !tg3_flag(tp, 57765_PLUS))
3512                         tw32(NVRAM_ADDR, phy_addr);
3513
3514                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3515                     !tg3_flag(tp, 5755_PLUS) &&
3516                     (tp->nvram_jedecnum == JEDEC_ST) &&
3517                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3518                         u32 cmd;
3519
3520                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3521                         ret = tg3_nvram_exec_cmd(tp, cmd);
3522                         if (ret)
3523                                 break;
3524                 }
3525                 if (!tg3_flag(tp, FLASH)) {
3526                         /* We always do complete word writes to eeprom. */
3527                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3528                 }
3529
3530                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3531                 if (ret)
3532                         break;
3533         }
3534         return ret;
3535 }
3536
3537 /* offset and length are dword aligned */
3538 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3539 {
3540         int ret;
3541
3542         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3543                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3544                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3545                 udelay(40);
3546         }
3547
3548         if (!tg3_flag(tp, NVRAM)) {
3549                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3550         } else {
3551                 u32 grc_mode;
3552
3553                 ret = tg3_nvram_lock(tp);
3554                 if (ret)
3555                         return ret;
3556
3557                 tg3_enable_nvram_access(tp);
3558                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3559                         tw32(NVRAM_WRITE1, 0x406);
3560
3561                 grc_mode = tr32(GRC_MODE);
3562                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3563
3564                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3565                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3566                                 buf);
3567                 } else {
3568                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3569                                 buf);
3570                 }
3571
3572                 grc_mode = tr32(GRC_MODE);
3573                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3574
3575                 tg3_disable_nvram_access(tp);
3576                 tg3_nvram_unlock(tp);
3577         }
3578
3579         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3580                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3581                 udelay(40);
3582         }
3583
3584         return ret;
3585 }
3586
3587 #define RX_CPU_SCRATCH_BASE     0x30000
3588 #define RX_CPU_SCRATCH_SIZE     0x04000
3589 #define TX_CPU_SCRATCH_BASE     0x34000
3590 #define TX_CPU_SCRATCH_SIZE     0x04000
3591
3592 /* tp->lock is held. */
3593 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3594 {
3595         int i;
3596         const int iters = 10000;
3597
3598         for (i = 0; i < iters; i++) {
3599                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3600                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3601                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3602                         break;
3603                 if (pci_channel_offline(tp->pdev))
3604                         return -EBUSY;
3605         }
3606
3607         return (i == iters) ? -EBUSY : 0;
3608 }
3609
3610 /* tp->lock is held. */
3611 static int tg3_rxcpu_pause(struct tg3 *tp)
3612 {
3613         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3614
3615         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3616         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3617         udelay(10);
3618
3619         return rc;
3620 }
3621
3622 /* tp->lock is held. */
3623 static int tg3_txcpu_pause(struct tg3 *tp)
3624 {
3625         return tg3_pause_cpu(tp, TX_CPU_BASE);
3626 }
3627
3628 /* tp->lock is held. */
3629 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3630 {
3631         tw32(cpu_base + CPU_STATE, 0xffffffff);
3632         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3633 }
3634
3635 /* tp->lock is held. */
3636 static void tg3_rxcpu_resume(struct tg3 *tp)
3637 {
3638         tg3_resume_cpu(tp, RX_CPU_BASE);
3639 }
3640
3641 /* tp->lock is held. */
3642 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3643 {
3644         int rc;
3645
3646         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3647
3648         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3649                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3650
3651                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3652                 return 0;
3653         }
3654         if (cpu_base == RX_CPU_BASE) {
3655                 rc = tg3_rxcpu_pause(tp);
3656         } else {
3657                 /*
3658                  * There is only an Rx CPU for the 5750 derivative in the
3659                  * BCM4785.
3660                  */
3661                 if (tg3_flag(tp, IS_SSB_CORE))
3662                         return 0;
3663
3664                 rc = tg3_txcpu_pause(tp);
3665         }
3666
3667         if (rc) {
3668                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3669                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3670                 return -ENODEV;
3671         }
3672
3673         /* Clear firmware's nvram arbitration. */
3674         if (tg3_flag(tp, NVRAM))
3675                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3676         return 0;
3677 }
3678
3679 static int tg3_fw_data_len(struct tg3 *tp,
3680                            const struct tg3_firmware_hdr *fw_hdr)
3681 {
3682         int fw_len;
3683
3684         /* Non fragmented firmware have one firmware header followed by a
3685          * contiguous chunk of data to be written. The length field in that
3686          * header is not the length of data to be written but the complete
3687          * length of the bss. The data length is determined based on
3688          * tp->fw->size minus headers.
3689          *
3690          * Fragmented firmware have a main header followed by multiple
3691          * fragments. Each fragment is identical to non fragmented firmware
3692          * with a firmware header followed by a contiguous chunk of data. In
3693          * the main header, the length field is unused and set to 0xffffffff.
3694          * In each fragment header the length is the entire size of that
3695          * fragment i.e. fragment data + header length. Data length is
3696          * therefore length field in the header minus TG3_FW_HDR_LEN.
3697          */
3698         if (tp->fw_len == 0xffffffff)
3699                 fw_len = be32_to_cpu(fw_hdr->len);
3700         else
3701                 fw_len = tp->fw->size;
3702
3703         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3704 }
3705
3706 /* tp->lock is held. */
3707 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3708                                  u32 cpu_scratch_base, int cpu_scratch_size,
3709                                  const struct tg3_firmware_hdr *fw_hdr)
3710 {
3711         int err, i;
3712         void (*write_op)(struct tg3 *, u32, u32);
3713         int total_len = tp->fw->size;
3714
3715         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3716                 netdev_err(tp->dev,
3717                            "%s: Trying to load TX cpu firmware which is 5705\n",
3718                            __func__);
3719                 return -EINVAL;
3720         }
3721
3722         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3723                 write_op = tg3_write_mem;
3724         else
3725                 write_op = tg3_write_indirect_reg32;
3726
3727         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3728                 /* It is possible that bootcode is still loading at this point.
3729                  * Get the nvram lock first before halting the cpu.
3730                  */
3731                 int lock_err = tg3_nvram_lock(tp);
3732                 err = tg3_halt_cpu(tp, cpu_base);
3733                 if (!lock_err)
3734                         tg3_nvram_unlock(tp);
3735                 if (err)
3736                         goto out;
3737
3738                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3739                         write_op(tp, cpu_scratch_base + i, 0);
3740                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3741                 tw32(cpu_base + CPU_MODE,
3742                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3743         } else {
3744                 /* Subtract additional main header for fragmented firmware and
3745                  * advance to the first fragment
3746                  */
3747                 total_len -= TG3_FW_HDR_LEN;
3748                 fw_hdr++;
3749         }
3750
3751         do {
3752                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3753                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3754                         write_op(tp, cpu_scratch_base +
3755                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3756                                      (i * sizeof(u32)),
3757                                  be32_to_cpu(fw_data[i]));
3758
3759                 total_len -= be32_to_cpu(fw_hdr->len);
3760
3761                 /* Advance to next fragment */
3762                 fw_hdr = (struct tg3_firmware_hdr *)
3763                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3764         } while (total_len > 0);
3765
3766         err = 0;
3767
3768 out:
3769         return err;
3770 }
3771
3772 /* tp->lock is held. */
3773 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3774 {
3775         int i;
3776         const int iters = 5;
3777
3778         tw32(cpu_base + CPU_STATE, 0xffffffff);
3779         tw32_f(cpu_base + CPU_PC, pc);
3780
3781         for (i = 0; i < iters; i++) {
3782                 if (tr32(cpu_base + CPU_PC) == pc)
3783                         break;
3784                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3785                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3786                 tw32_f(cpu_base + CPU_PC, pc);
3787                 udelay(1000);
3788         }
3789
3790         return (i == iters) ? -EBUSY : 0;
3791 }
3792
3793 /* tp->lock is held. */
3794 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3795 {
3796         const struct tg3_firmware_hdr *fw_hdr;
3797         int err;
3798
3799         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3800
3801         /* Firmware blob starts with version numbers, followed by
3802            start address and length. We are setting complete length.
3803            length = end_address_of_bss - start_address_of_text.
3804            Remainder is the blob to be loaded contiguously
3805            from start address. */
3806
3807         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3808                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3809                                     fw_hdr);
3810         if (err)
3811                 return err;
3812
3813         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3814                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3815                                     fw_hdr);
3816         if (err)
3817                 return err;
3818
3819         /* Now startup only the RX cpu. */
3820         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3821                                        be32_to_cpu(fw_hdr->base_addr));
3822         if (err) {
3823                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3824                            "should be %08x\n", __func__,
3825                            tr32(RX_CPU_BASE + CPU_PC),
3826                                 be32_to_cpu(fw_hdr->base_addr));
3827                 return -ENODEV;
3828         }
3829
3830         tg3_rxcpu_resume(tp);
3831
3832         return 0;
3833 }
3834
3835 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3836 {
3837         const int iters = 1000;
3838         int i;
3839         u32 val;
3840
3841         /* Wait for boot code to complete initialization and enter service
3842          * loop. It is then safe to download service patches
3843          */
3844         for (i = 0; i < iters; i++) {
3845                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3846                         break;
3847
3848                 udelay(10);
3849         }
3850
3851         if (i == iters) {
3852                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3853                 return -EBUSY;
3854         }
3855
3856         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3857         if (val & 0xff) {
3858                 netdev_warn(tp->dev,
3859                             "Other patches exist. Not downloading EEE patch\n");
3860                 return -EEXIST;
3861         }
3862
3863         return 0;
3864 }
3865
3866 /* tp->lock is held. */
3867 static void tg3_load_57766_firmware(struct tg3 *tp)
3868 {
3869         struct tg3_firmware_hdr *fw_hdr;
3870
3871         if (!tg3_flag(tp, NO_NVRAM))
3872                 return;
3873
3874         if (tg3_validate_rxcpu_state(tp))
3875                 return;
3876
3877         if (!tp->fw)
3878                 return;
3879
3880         /* This firmware blob has a different format than older firmware
3881          * releases as given below. The main difference is we have fragmented
3882          * data to be written to non-contiguous locations.
3883          *
3884          * In the beginning we have a firmware header identical to other
3885          * firmware which consists of version, base addr and length. The length
3886          * here is unused and set to 0xffffffff.
3887          *
3888          * This is followed by a series of firmware fragments which are
3889          * individually identical to previous firmware. i.e. they have the
3890          * firmware header and followed by data for that fragment. The version
3891          * field of the individual fragment header is unused.
3892          */
3893
3894         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3895         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3896                 return;
3897
3898         if (tg3_rxcpu_pause(tp))
3899                 return;
3900
3901         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3902         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3903
3904         tg3_rxcpu_resume(tp);
3905 }
3906
3907 /* tp->lock is held. */
3908 static int tg3_load_tso_firmware(struct tg3 *tp)
3909 {
3910         const struct tg3_firmware_hdr *fw_hdr;
3911         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3912         int err;
3913
3914         if (!tg3_flag(tp, FW_TSO))
3915                 return 0;
3916
3917         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3918
3919         /* Firmware blob starts with version numbers, followed by
3920            start address and length. We are setting complete length.
3921            length = end_address_of_bss - start_address_of_text.
3922            Remainder is the blob to be loaded contiguously
3923            from start address. */
3924
3925         cpu_scratch_size = tp->fw_len;
3926
3927         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3928                 cpu_base = RX_CPU_BASE;
3929                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3930         } else {
3931                 cpu_base = TX_CPU_BASE;
3932                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3933                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3934         }
3935
3936         err = tg3_load_firmware_cpu(tp, cpu_base,
3937                                     cpu_scratch_base, cpu_scratch_size,
3938                                     fw_hdr);
3939         if (err)
3940                 return err;
3941
3942         /* Now startup the cpu. */
3943         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3944                                        be32_to_cpu(fw_hdr->base_addr));
3945         if (err) {
3946                 netdev_err(tp->dev,
3947                            "%s fails to set CPU PC, is %08x should be %08x\n",
3948                            __func__, tr32(cpu_base + CPU_PC),
3949                            be32_to_cpu(fw_hdr->base_addr));
3950                 return -ENODEV;
3951         }
3952
3953         tg3_resume_cpu(tp, cpu_base);
3954         return 0;
3955 }
3956
3957 /* tp->lock is held. */
3958 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3959 {
3960         u32 addr_high, addr_low;
3961
3962         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3963         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3964                     (mac_addr[4] <<  8) | mac_addr[5]);
3965
3966         if (index < 4) {
3967                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3968                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3969         } else {
3970                 index -= 4;
3971                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3972                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3973         }
3974 }
3975
3976 /* tp->lock is held. */
3977 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3978 {
3979         u32 addr_high;
3980         int i;
3981
3982         for (i = 0; i < 4; i++) {
3983                 if (i == 1 && skip_mac_1)
3984                         continue;
3985                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3986         }
3987
3988         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3989             tg3_asic_rev(tp) == ASIC_REV_5704) {
3990                 for (i = 4; i < 16; i++)
3991                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3992         }
3993
3994         addr_high = (tp->dev->dev_addr[0] +
3995                      tp->dev->dev_addr[1] +
3996                      tp->dev->dev_addr[2] +
3997                      tp->dev->dev_addr[3] +
3998                      tp->dev->dev_addr[4] +
3999                      tp->dev->dev_addr[5]) &
4000                 TX_BACKOFF_SEED_MASK;
4001         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4002 }
4003
4004 static void tg3_enable_register_access(struct tg3 *tp)
4005 {
4006         /*
4007          * Make sure register accesses (indirect or otherwise) will function
4008          * correctly.
4009          */
4010         pci_write_config_dword(tp->pdev,
4011                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4012 }
4013
4014 static int tg3_power_up(struct tg3 *tp)
4015 {
4016         int err;
4017
4018         tg3_enable_register_access(tp);
4019
4020         err = pci_set_power_state(tp->pdev, PCI_D0);
4021         if (!err) {
4022                 /* Switch out of Vaux if it is a NIC */
4023                 tg3_pwrsrc_switch_to_vmain(tp);
4024         } else {
4025                 netdev_err(tp->dev, "Transition to D0 failed\n");
4026         }
4027
4028         return err;
4029 }
4030
4031 static int tg3_setup_phy(struct tg3 *, bool);
4032
4033 static int tg3_power_down_prepare(struct tg3 *tp)
4034 {
4035         u32 misc_host_ctrl;
4036         bool device_should_wake, do_low_power;
4037
4038         tg3_enable_register_access(tp);
4039
4040         /* Restore the CLKREQ setting. */
4041         if (tg3_flag(tp, CLKREQ_BUG))
4042                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4043                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4044
4045         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4046         tw32(TG3PCI_MISC_HOST_CTRL,
4047              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4048
4049         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4050                              tg3_flag(tp, WOL_ENABLE);
4051
4052         if (tg3_flag(tp, USE_PHYLIB)) {
4053                 do_low_power = false;
4054                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4055                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4056                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4057                         struct phy_device *phydev;
4058                         u32 phyid;
4059
4060                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4061
4062                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4063
4064                         tp->link_config.speed = phydev->speed;
4065                         tp->link_config.duplex = phydev->duplex;
4066                         tp->link_config.autoneg = phydev->autoneg;
4067                         ethtool_convert_link_mode_to_legacy_u32(
4068                                 &tp->link_config.advertising,
4069                                 phydev->advertising);
4070
4071                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4072                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4073                                          advertising);
4074                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4075                                          advertising);
4076                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4077                                          advertising);
4078
4079                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4080                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4081                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4082                                                          advertising);
4083                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4084                                                          advertising);
4085                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4086                                                          advertising);
4087                                 } else {
4088                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4089                                                          advertising);
4090                                 }
4091                         }
4092
4093                         linkmode_copy(phydev->advertising, advertising);
4094                         phy_start_aneg(phydev);
4095
4096                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4097                         if (phyid != PHY_ID_BCMAC131) {
4098                                 phyid &= PHY_BCM_OUI_MASK;
4099                                 if (phyid == PHY_BCM_OUI_1 ||
4100                                     phyid == PHY_BCM_OUI_2 ||
4101                                     phyid == PHY_BCM_OUI_3)
4102                                         do_low_power = true;
4103                         }
4104                 }
4105         } else {
4106                 do_low_power = true;
4107
4108                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4109                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4110
4111                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4112                         tg3_setup_phy(tp, false);
4113         }
4114
4115         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4116                 u32 val;
4117
4118                 val = tr32(GRC_VCPU_EXT_CTRL);
4119                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4120         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4121                 int i;
4122                 u32 val;
4123
4124                 for (i = 0; i < 200; i++) {
4125                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4126                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4127                                 break;
4128                         msleep(1);
4129                 }
4130         }
4131         if (tg3_flag(tp, WOL_CAP))
4132                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4133                                                      WOL_DRV_STATE_SHUTDOWN |
4134                                                      WOL_DRV_WOL |
4135                                                      WOL_SET_MAGIC_PKT);
4136
4137         if (device_should_wake) {
4138                 u32 mac_mode;
4139
4140                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4141                         if (do_low_power &&
4142                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4143                                 tg3_phy_auxctl_write(tp,
4144                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4145                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4146                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4147                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4148                                 udelay(40);
4149                         }
4150
4151                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4152                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4153                         else if (tp->phy_flags &
4154                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4155                                 if (tp->link_config.active_speed == SPEED_1000)
4156                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4157                                 else
4158                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4159                         } else
4160                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4161
4162                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4163                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4164                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4165                                              SPEED_100 : SPEED_10;
4166                                 if (tg3_5700_link_polarity(tp, speed))
4167                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4168                                 else
4169                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4170                         }
4171                 } else {
4172                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4173                 }
4174
4175                 if (!tg3_flag(tp, 5750_PLUS))
4176                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4177
4178                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4179                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4180                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4181                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4182
4183                 if (tg3_flag(tp, ENABLE_APE))
4184                         mac_mode |= MAC_MODE_APE_TX_EN |
4185                                     MAC_MODE_APE_RX_EN |
4186                                     MAC_MODE_TDE_ENABLE;
4187
4188                 tw32_f(MAC_MODE, mac_mode);
4189                 udelay(100);
4190
4191                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4192                 udelay(10);
4193         }
4194
4195         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4196             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4197              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4198                 u32 base_val;
4199
4200                 base_val = tp->pci_clock_ctrl;
4201                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4202                              CLOCK_CTRL_TXCLK_DISABLE);
4203
4204                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4205                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4206         } else if (tg3_flag(tp, 5780_CLASS) ||
4207                    tg3_flag(tp, CPMU_PRESENT) ||
4208                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4209                 /* do nothing */
4210         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4211                 u32 newbits1, newbits2;
4212
4213                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4214                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4215                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4216                                     CLOCK_CTRL_TXCLK_DISABLE |
4217                                     CLOCK_CTRL_ALTCLK);
4218                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4219                 } else if (tg3_flag(tp, 5705_PLUS)) {
4220                         newbits1 = CLOCK_CTRL_625_CORE;
4221                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4222                 } else {
4223                         newbits1 = CLOCK_CTRL_ALTCLK;
4224                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4225                 }
4226
4227                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4228                             40);
4229
4230                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4231                             40);
4232
4233                 if (!tg3_flag(tp, 5705_PLUS)) {
4234                         u32 newbits3;
4235
4236                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4237                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4238                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4239                                             CLOCK_CTRL_TXCLK_DISABLE |
4240                                             CLOCK_CTRL_44MHZ_CORE);
4241                         } else {
4242                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4243                         }
4244
4245                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4246                                     tp->pci_clock_ctrl | newbits3, 40);
4247                 }
4248         }
4249
4250         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4251                 tg3_power_down_phy(tp, do_low_power);
4252
4253         tg3_frob_aux_power(tp, true);
4254
4255         /* Workaround for unstable PLL clock */
4256         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4257             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4258              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4259                 u32 val = tr32(0x7d00);
4260
4261                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4262                 tw32(0x7d00, val);
4263                 if (!tg3_flag(tp, ENABLE_ASF)) {
4264                         int err;
4265
4266                         err = tg3_nvram_lock(tp);
4267                         tg3_halt_cpu(tp, RX_CPU_BASE);
4268                         if (!err)
4269                                 tg3_nvram_unlock(tp);
4270                 }
4271         }
4272
4273         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4274
4275         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4276
4277         return 0;
4278 }
4279
4280 static void tg3_power_down(struct tg3 *tp)
4281 {
4282         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4283         pci_set_power_state(tp->pdev, PCI_D3hot);
4284 }
4285
4286 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4287 {
4288         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4289         case MII_TG3_AUX_STAT_10HALF:
4290                 *speed = SPEED_10;
4291                 *duplex = DUPLEX_HALF;
4292                 break;
4293
4294         case MII_TG3_AUX_STAT_10FULL:
4295                 *speed = SPEED_10;
4296                 *duplex = DUPLEX_FULL;
4297                 break;
4298
4299         case MII_TG3_AUX_STAT_100HALF:
4300                 *speed = SPEED_100;
4301                 *duplex = DUPLEX_HALF;
4302                 break;
4303
4304         case MII_TG3_AUX_STAT_100FULL:
4305                 *speed = SPEED_100;
4306                 *duplex = DUPLEX_FULL;
4307                 break;
4308
4309         case MII_TG3_AUX_STAT_1000HALF:
4310                 *speed = SPEED_1000;
4311                 *duplex = DUPLEX_HALF;
4312                 break;
4313
4314         case MII_TG3_AUX_STAT_1000FULL:
4315                 *speed = SPEED_1000;
4316                 *duplex = DUPLEX_FULL;
4317                 break;
4318
4319         default:
4320                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4321                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4322                                  SPEED_10;
4323                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4324                                   DUPLEX_HALF;
4325                         break;
4326                 }
4327                 *speed = SPEED_UNKNOWN;
4328                 *duplex = DUPLEX_UNKNOWN;
4329                 break;
4330         }
4331 }
4332
4333 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4334 {
4335         int err = 0;
4336         u32 val, new_adv;
4337
4338         new_adv = ADVERTISE_CSMA;
4339         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4340         new_adv |= mii_advertise_flowctrl(flowctrl);
4341
4342         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4343         if (err)
4344                 goto done;
4345
4346         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4347                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4348
4349                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4350                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4351                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4352
4353                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4354                 if (err)
4355                         goto done;
4356         }
4357
4358         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4359                 goto done;
4360
4361         tw32(TG3_CPMU_EEE_MODE,
4362              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4363
4364         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4365         if (!err) {
4366                 u32 err2;
4367
4368                 val = 0;
4369                 /* Advertise 100-BaseTX EEE ability */
4370                 if (advertise & ADVERTISED_100baseT_Full)
4371                         val |= MDIO_AN_EEE_ADV_100TX;
4372                 /* Advertise 1000-BaseT EEE ability */
4373                 if (advertise & ADVERTISED_1000baseT_Full)
4374                         val |= MDIO_AN_EEE_ADV_1000T;
4375
4376                 if (!tp->eee.eee_enabled) {
4377                         val = 0;
4378                         tp->eee.advertised = 0;
4379                 } else {
4380                         tp->eee.advertised = advertise &
4381                                              (ADVERTISED_100baseT_Full |
4382                                               ADVERTISED_1000baseT_Full);
4383                 }
4384
4385                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4386                 if (err)
4387                         val = 0;
4388
4389                 switch (tg3_asic_rev(tp)) {
4390                 case ASIC_REV_5717:
4391                 case ASIC_REV_57765:
4392                 case ASIC_REV_57766:
4393                 case ASIC_REV_5719:
4394                         /* If we advertised any eee advertisements above... */
4395                         if (val)
4396                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4397                                       MII_TG3_DSP_TAP26_RMRXSTO |
4398                                       MII_TG3_DSP_TAP26_OPCSINPT;
4399                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4400                         /* Fall through */
4401                 case ASIC_REV_5720:
4402                 case ASIC_REV_5762:
4403                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4404                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4405                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4406                 }
4407
4408                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4409                 if (!err)
4410                         err = err2;
4411         }
4412
4413 done:
4414         return err;
4415 }
4416
4417 static void tg3_phy_copper_begin(struct tg3 *tp)
4418 {
4419         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4420             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4421                 u32 adv, fc;
4422
4423                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4424                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4425                         adv = ADVERTISED_10baseT_Half |
4426                               ADVERTISED_10baseT_Full;
4427                         if (tg3_flag(tp, WOL_SPEED_100MB))
4428                                 adv |= ADVERTISED_100baseT_Half |
4429                                        ADVERTISED_100baseT_Full;
4430                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4431                                 if (!(tp->phy_flags &
4432                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4433                                         adv |= ADVERTISED_1000baseT_Half;
4434                                 adv |= ADVERTISED_1000baseT_Full;
4435                         }
4436
4437                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4438                 } else {
4439                         adv = tp->link_config.advertising;
4440                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4441                                 adv &= ~(ADVERTISED_1000baseT_Half |
4442                                          ADVERTISED_1000baseT_Full);
4443
4444                         fc = tp->link_config.flowctrl;
4445                 }
4446
4447                 tg3_phy_autoneg_cfg(tp, adv, fc);
4448
4449                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4450                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4451                         /* Normally during power down we want to autonegotiate
4452                          * the lowest possible speed for WOL. However, to avoid
4453                          * link flap, we leave it untouched.
4454                          */
4455                         return;
4456                 }
4457
4458                 tg3_writephy(tp, MII_BMCR,
4459                              BMCR_ANENABLE | BMCR_ANRESTART);
4460         } else {
4461                 int i;
4462                 u32 bmcr, orig_bmcr;
4463
4464                 tp->link_config.active_speed = tp->link_config.speed;
4465                 tp->link_config.active_duplex = tp->link_config.duplex;
4466
4467                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4468                         /* With autoneg disabled, 5715 only links up when the
4469                          * advertisement register has the configured speed
4470                          * enabled.
4471                          */
4472                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4473                 }
4474
4475                 bmcr = 0;
4476                 switch (tp->link_config.speed) {
4477                 default:
4478                 case SPEED_10:
4479                         break;
4480
4481                 case SPEED_100:
4482                         bmcr |= BMCR_SPEED100;
4483                         break;
4484
4485                 case SPEED_1000:
4486                         bmcr |= BMCR_SPEED1000;
4487                         break;
4488                 }
4489
4490                 if (tp->link_config.duplex == DUPLEX_FULL)
4491                         bmcr |= BMCR_FULLDPLX;
4492
4493                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4494                     (bmcr != orig_bmcr)) {
4495                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4496                         for (i = 0; i < 1500; i++) {
4497                                 u32 tmp;
4498
4499                                 udelay(10);
4500                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4501                                     tg3_readphy(tp, MII_BMSR, &tmp))
4502                                         continue;
4503                                 if (!(tmp & BMSR_LSTATUS)) {
4504                                         udelay(40);
4505                                         break;
4506                                 }
4507                         }
4508                         tg3_writephy(tp, MII_BMCR, bmcr);
4509                         udelay(40);
4510                 }
4511         }
4512 }
4513
4514 static int tg3_phy_pull_config(struct tg3 *tp)
4515 {
4516         int err;
4517         u32 val;
4518
4519         err = tg3_readphy(tp, MII_BMCR, &val);
4520         if (err)
4521                 goto done;
4522
4523         if (!(val & BMCR_ANENABLE)) {
4524                 tp->link_config.autoneg = AUTONEG_DISABLE;
4525                 tp->link_config.advertising = 0;
4526                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4527
4528                 err = -EIO;
4529
4530                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4531                 case 0:
4532                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4533                                 goto done;
4534
4535                         tp->link_config.speed = SPEED_10;
4536                         break;
4537                 case BMCR_SPEED100:
4538                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4539                                 goto done;
4540
4541                         tp->link_config.speed = SPEED_100;
4542                         break;
4543                 case BMCR_SPEED1000:
4544                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4545                                 tp->link_config.speed = SPEED_1000;
4546                                 break;
4547                         }
4548                         /* Fall through */
4549                 default:
4550                         goto done;
4551                 }
4552
4553                 if (val & BMCR_FULLDPLX)
4554                         tp->link_config.duplex = DUPLEX_FULL;
4555                 else
4556                         tp->link_config.duplex = DUPLEX_HALF;
4557
4558                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4559
4560                 err = 0;
4561                 goto done;
4562         }
4563
4564         tp->link_config.autoneg = AUTONEG_ENABLE;
4565         tp->link_config.advertising = ADVERTISED_Autoneg;
4566         tg3_flag_set(tp, PAUSE_AUTONEG);
4567
4568         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4569                 u32 adv;
4570
4571                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4572                 if (err)
4573                         goto done;
4574
4575                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4576                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4577
4578                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4579         } else {
4580                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4581         }
4582
4583         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4584                 u32 adv;
4585
4586                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4587                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4588                         if (err)
4589                                 goto done;
4590
4591                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4592                 } else {
4593                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4594                         if (err)
4595                                 goto done;
4596
4597                         adv = tg3_decode_flowctrl_1000X(val);
4598                         tp->link_config.flowctrl = adv;
4599
4600                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4601                         adv = mii_adv_to_ethtool_adv_x(val);
4602                 }
4603
4604                 tp->link_config.advertising |= adv;
4605         }
4606
4607 done:
4608         return err;
4609 }
4610
4611 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4612 {
4613         int err;
4614
4615         /* Turn off tap power management. */
4616         /* Set Extended packet length bit */
4617         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4618
4619         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4620         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4621         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4622         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4623         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4624
4625         udelay(40);
4626
4627         return err;
4628 }
4629
4630 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4631 {
4632         struct ethtool_eee eee;
4633
4634         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4635                 return true;
4636
4637         tg3_eee_pull_config(tp, &eee);
4638
4639         if (tp->eee.eee_enabled) {
4640                 if (tp->eee.advertised != eee.advertised ||
4641                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4642                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4643                         return false;
4644         } else {
4645                 /* EEE is disabled but we're advertising */
4646                 if (eee.advertised)
4647                         return false;
4648         }
4649
4650         return true;
4651 }
4652
4653 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4654 {
4655         u32 advmsk, tgtadv, advertising;
4656
4657         advertising = tp->link_config.advertising;
4658         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4659
4660         advmsk = ADVERTISE_ALL;
4661         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4662                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4663                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4664         }
4665
4666         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4667                 return false;
4668
4669         if ((*lcladv & advmsk) != tgtadv)
4670                 return false;
4671
4672         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4673                 u32 tg3_ctrl;
4674
4675                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4676
4677                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4678                         return false;
4679
4680                 if (tgtadv &&
4681                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4682                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4683                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4684                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4685                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4686                 } else {
4687                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4688                 }
4689
4690                 if (tg3_ctrl != tgtadv)
4691                         return false;
4692         }
4693
4694         return true;
4695 }
4696
4697 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4698 {
4699         u32 lpeth = 0;
4700
4701         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4702                 u32 val;
4703
4704                 if (tg3_readphy(tp, MII_STAT1000, &val))
4705                         return false;
4706
4707                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4708         }
4709
4710         if (tg3_readphy(tp, MII_LPA, rmtadv))
4711                 return false;
4712
4713         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4714         tp->link_config.rmt_adv = lpeth;
4715
4716         return true;
4717 }
4718
4719 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4720 {
4721         if (curr_link_up != tp->link_up) {
4722                 if (curr_link_up) {
4723                         netif_carrier_on(tp->dev);
4724                 } else {
4725                         netif_carrier_off(tp->dev);
4726                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4727                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4728                 }
4729
4730                 tg3_link_report(tp);
4731                 return true;
4732         }
4733
4734         return false;
4735 }
4736
4737 static void tg3_clear_mac_status(struct tg3 *tp)
4738 {
4739         tw32(MAC_EVENT, 0);
4740
4741         tw32_f(MAC_STATUS,
4742                MAC_STATUS_SYNC_CHANGED |
4743                MAC_STATUS_CFG_CHANGED |
4744                MAC_STATUS_MI_COMPLETION |
4745                MAC_STATUS_LNKSTATE_CHANGED);
4746         udelay(40);
4747 }
4748
4749 static void tg3_setup_eee(struct tg3 *tp)
4750 {
4751         u32 val;
4752
4753         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4754               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4755         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4756                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4757
4758         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4759
4760         tw32_f(TG3_CPMU_EEE_CTRL,
4761                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4762
4763         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4764               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4765               TG3_CPMU_EEEMD_LPI_IN_RX |
4766               TG3_CPMU_EEEMD_EEE_ENABLE;
4767
4768         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4769                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4770
4771         if (tg3_flag(tp, ENABLE_APE))
4772                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4773
4774         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4775
4776         tw32_f(TG3_CPMU_EEE_DBTMR1,
4777                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4778                (tp->eee.tx_lpi_timer & 0xffff));
4779
4780         tw32_f(TG3_CPMU_EEE_DBTMR2,
4781                TG3_CPMU_DBTMR2_APE_TX_2047US |
4782                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4783 }
4784
4785 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4786 {
4787         bool current_link_up;
4788         u32 bmsr, val;
4789         u32 lcl_adv, rmt_adv;
4790         u32 current_speed;
4791         u8 current_duplex;
4792         int i, err;
4793
4794         tg3_clear_mac_status(tp);
4795
4796         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4797                 tw32_f(MAC_MI_MODE,
4798                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4799                 udelay(80);
4800         }
4801
4802         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4803
4804         /* Some third-party PHYs need to be reset on link going
4805          * down.
4806          */
4807         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4808              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4809              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4810             tp->link_up) {
4811                 tg3_readphy(tp, MII_BMSR, &bmsr);
4812                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4813                     !(bmsr & BMSR_LSTATUS))
4814                         force_reset = true;
4815         }
4816         if (force_reset)
4817                 tg3_phy_reset(tp);
4818
4819         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4820                 tg3_readphy(tp, MII_BMSR, &bmsr);
4821                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4822                     !tg3_flag(tp, INIT_COMPLETE))
4823                         bmsr = 0;
4824
4825                 if (!(bmsr & BMSR_LSTATUS)) {
4826                         err = tg3_init_5401phy_dsp(tp);
4827                         if (err)
4828                                 return err;
4829
4830                         tg3_readphy(tp, MII_BMSR, &bmsr);
4831                         for (i = 0; i < 1000; i++) {
4832                                 udelay(10);
4833                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4834                                     (bmsr & BMSR_LSTATUS)) {
4835                                         udelay(40);
4836                                         break;
4837                                 }
4838                         }
4839
4840                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4841                             TG3_PHY_REV_BCM5401_B0 &&
4842                             !(bmsr & BMSR_LSTATUS) &&
4843                             tp->link_config.active_speed == SPEED_1000) {
4844                                 err = tg3_phy_reset(tp);
4845                                 if (!err)
4846                                         err = tg3_init_5401phy_dsp(tp);
4847                                 if (err)
4848                                         return err;
4849                         }
4850                 }
4851         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4852                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4853                 /* 5701 {A0,B0} CRC bug workaround */
4854                 tg3_writephy(tp, 0x15, 0x0a75);
4855                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4856                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4857                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4858         }
4859
4860         /* Clear pending interrupts... */
4861         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4862         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4863
4864         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4865                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4866         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4867                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4868
4869         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4870             tg3_asic_rev(tp) == ASIC_REV_5701) {
4871                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4872                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4873                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4874                 else
4875                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4876         }
4877
4878         current_link_up = false;
4879         current_speed = SPEED_UNKNOWN;
4880         current_duplex = DUPLEX_UNKNOWN;
4881         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4882         tp->link_config.rmt_adv = 0;
4883
4884         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4885                 err = tg3_phy_auxctl_read(tp,
4886                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4887                                           &val);
4888                 if (!err && !(val & (1 << 10))) {
4889                         tg3_phy_auxctl_write(tp,
4890                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4891                                              val | (1 << 10));
4892                         goto relink;
4893                 }
4894         }
4895
4896         bmsr = 0;
4897         for (i = 0; i < 100; i++) {
4898                 tg3_readphy(tp, MII_BMSR, &bmsr);
4899                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4900                     (bmsr & BMSR_LSTATUS))
4901                         break;
4902                 udelay(40);
4903         }
4904
4905         if (bmsr & BMSR_LSTATUS) {
4906                 u32 aux_stat, bmcr;
4907
4908                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4909                 for (i = 0; i < 2000; i++) {
4910                         udelay(10);
4911                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4912                             aux_stat)
4913                                 break;
4914                 }
4915
4916                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4917                                              &current_speed,
4918                                              &current_duplex);
4919
4920                 bmcr = 0;
4921                 for (i = 0; i < 200; i++) {
4922                         tg3_readphy(tp, MII_BMCR, &bmcr);
4923                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4924                                 continue;
4925                         if (bmcr && bmcr != 0x7fff)
4926                                 break;
4927                         udelay(10);
4928                 }
4929
4930                 lcl_adv = 0;
4931                 rmt_adv = 0;
4932
4933                 tp->link_config.active_speed = current_speed;
4934                 tp->link_config.active_duplex = current_duplex;
4935
4936                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4937                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4938
4939                         if ((bmcr & BMCR_ANENABLE) &&
4940                             eee_config_ok &&
4941                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4942                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4943                                 current_link_up = true;
4944
4945                         /* EEE settings changes take effect only after a phy
4946                          * reset.  If we have skipped a reset due to Link Flap
4947                          * Avoidance being enabled, do it now.
4948                          */
4949                         if (!eee_config_ok &&
4950                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4951                             !force_reset) {
4952                                 tg3_setup_eee(tp);
4953                                 tg3_phy_reset(tp);
4954                         }
4955                 } else {
4956                         if (!(bmcr & BMCR_ANENABLE) &&
4957                             tp->link_config.speed == current_speed &&
4958                             tp->link_config.duplex == current_duplex) {
4959                                 current_link_up = true;
4960                         }
4961                 }
4962
4963                 if (current_link_up &&
4964                     tp->link_config.active_duplex == DUPLEX_FULL) {
4965                         u32 reg, bit;
4966
4967                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4968                                 reg = MII_TG3_FET_GEN_STAT;
4969                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4970                         } else {
4971                                 reg = MII_TG3_EXT_STAT;
4972                                 bit = MII_TG3_EXT_STAT_MDIX;
4973                         }
4974
4975                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4976                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4977
4978                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4979                 }
4980         }
4981
4982 relink:
4983         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4984                 tg3_phy_copper_begin(tp);
4985
4986                 if (tg3_flag(tp, ROBOSWITCH)) {
4987                         current_link_up = true;
4988                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4989                         current_speed = SPEED_1000;
4990                         current_duplex = DUPLEX_FULL;
4991                         tp->link_config.active_speed = current_speed;
4992                         tp->link_config.active_duplex = current_duplex;
4993                 }
4994
4995                 tg3_readphy(tp, MII_BMSR, &bmsr);
4996                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4997                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4998                         current_link_up = true;
4999         }
5000
5001         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5002         if (current_link_up) {
5003                 if (tp->link_config.active_speed == SPEED_100 ||
5004                     tp->link_config.active_speed == SPEED_10)
5005                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5006                 else
5007                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5008         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5009                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5010         else
5011                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5012
5013         /* In order for the 5750 core in BCM4785 chip to work properly
5014          * in RGMII mode, the Led Control Register must be set up.
5015          */
5016         if (tg3_flag(tp, RGMII_MODE)) {
5017                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5018                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5019
5020                 if (tp->link_config.active_speed == SPEED_10)
5021                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5022                 else if (tp->link_config.active_speed == SPEED_100)
5023                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5024                                      LED_CTRL_100MBPS_ON);
5025                 else if (tp->link_config.active_speed == SPEED_1000)
5026                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5027                                      LED_CTRL_1000MBPS_ON);
5028
5029                 tw32(MAC_LED_CTRL, led_ctrl);
5030                 udelay(40);
5031         }
5032
5033         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5034         if (tp->link_config.active_duplex == DUPLEX_HALF)
5035                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5036
5037         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5038                 if (current_link_up &&
5039                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5040                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5041                 else
5042                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5043         }
5044
5045         /* ??? Without this setting Netgear GA302T PHY does not
5046          * ??? send/receive packets...
5047          */
5048         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5049             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5050                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5051                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5052                 udelay(80);
5053         }
5054
5055         tw32_f(MAC_MODE, tp->mac_mode);
5056         udelay(40);
5057
5058         tg3_phy_eee_adjust(tp, current_link_up);
5059
5060         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5061                 /* Polled via timer. */
5062                 tw32_f(MAC_EVENT, 0);
5063         } else {
5064                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5065         }
5066         udelay(40);
5067
5068         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5069             current_link_up &&
5070             tp->link_config.active_speed == SPEED_1000 &&
5071             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5072                 udelay(120);
5073                 tw32_f(MAC_STATUS,
5074                      (MAC_STATUS_SYNC_CHANGED |
5075                       MAC_STATUS_CFG_CHANGED));
5076                 udelay(40);
5077                 tg3_write_mem(tp,
5078                               NIC_SRAM_FIRMWARE_MBOX,
5079                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5080         }
5081
5082         /* Prevent send BD corruption. */
5083         if (tg3_flag(tp, CLKREQ_BUG)) {
5084                 if (tp->link_config.active_speed == SPEED_100 ||
5085                     tp->link_config.active_speed == SPEED_10)
5086                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5087                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5088                 else
5089                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5090                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5091         }
5092
5093         tg3_test_and_report_link_chg(tp, current_link_up);
5094
5095         return 0;
5096 }
5097
5098 struct tg3_fiber_aneginfo {
5099         int state;
5100 #define ANEG_STATE_UNKNOWN              0
5101 #define ANEG_STATE_AN_ENABLE            1
5102 #define ANEG_STATE_RESTART_INIT         2
5103 #define ANEG_STATE_RESTART              3
5104 #define ANEG_STATE_DISABLE_LINK_OK      4
5105 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5106 #define ANEG_STATE_ABILITY_DETECT       6
5107 #define ANEG_STATE_ACK_DETECT_INIT      7
5108 #define ANEG_STATE_ACK_DETECT           8
5109 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5110 #define ANEG_STATE_COMPLETE_ACK         10
5111 #define ANEG_STATE_IDLE_DETECT_INIT     11
5112 #define ANEG_STATE_IDLE_DETECT          12
5113 #define ANEG_STATE_LINK_OK              13
5114 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5115 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5116
5117         u32 flags;
5118 #define MR_AN_ENABLE            0x00000001
5119 #define MR_RESTART_AN           0x00000002
5120 #define MR_AN_COMPLETE          0x00000004
5121 #define MR_PAGE_RX              0x00000008
5122 #define MR_NP_LOADED            0x00000010
5123 #define MR_TOGGLE_TX            0x00000020
5124 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5125 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5126 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5127 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5128 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5129 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5130 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5131 #define MR_TOGGLE_RX            0x00002000
5132 #define MR_NP_RX                0x00004000
5133
5134 #define MR_LINK_OK              0x80000000
5135
5136         unsigned long link_time, cur_time;
5137
5138         u32 ability_match_cfg;
5139         int ability_match_count;
5140
5141         char ability_match, idle_match, ack_match;
5142
5143         u32 txconfig, rxconfig;
5144 #define ANEG_CFG_NP             0x00000080
5145 #define ANEG_CFG_ACK            0x00000040
5146 #define ANEG_CFG_RF2            0x00000020
5147 #define ANEG_CFG_RF1            0x00000010
5148 #define ANEG_CFG_PS2            0x00000001
5149 #define ANEG_CFG_PS1            0x00008000
5150 #define ANEG_CFG_HD             0x00004000
5151 #define ANEG_CFG_FD             0x00002000
5152 #define ANEG_CFG_INVAL          0x00001f06
5153
5154 };
5155 #define ANEG_OK         0
5156 #define ANEG_DONE       1
5157 #define ANEG_TIMER_ENAB 2
5158 #define ANEG_FAILED     -1
5159
5160 #define ANEG_STATE_SETTLE_TIME  10000
5161
5162 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5163                                    struct tg3_fiber_aneginfo *ap)
5164 {
5165         u16 flowctrl;
5166         unsigned long delta;
5167         u32 rx_cfg_reg;
5168         int ret;
5169
5170         if (ap->state == ANEG_STATE_UNKNOWN) {
5171                 ap->rxconfig = 0;
5172                 ap->link_time = 0;
5173                 ap->cur_time = 0;
5174                 ap->ability_match_cfg = 0;
5175                 ap->ability_match_count = 0;
5176                 ap->ability_match = 0;
5177                 ap->idle_match = 0;
5178                 ap->ack_match = 0;
5179         }
5180         ap->cur_time++;
5181
5182         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5183                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5184
5185                 if (rx_cfg_reg != ap->ability_match_cfg) {
5186                         ap->ability_match_cfg = rx_cfg_reg;
5187                         ap->ability_match = 0;
5188                         ap->ability_match_count = 0;
5189                 } else {
5190                         if (++ap->ability_match_count > 1) {
5191                                 ap->ability_match = 1;
5192                                 ap->ability_match_cfg = rx_cfg_reg;
5193                         }
5194                 }
5195                 if (rx_cfg_reg & ANEG_CFG_ACK)
5196                         ap->ack_match = 1;
5197                 else
5198                         ap->ack_match = 0;
5199
5200                 ap->idle_match = 0;
5201         } else {
5202                 ap->idle_match = 1;
5203                 ap->ability_match_cfg = 0;
5204                 ap->ability_match_count = 0;
5205                 ap->ability_match = 0;
5206                 ap->ack_match = 0;
5207
5208                 rx_cfg_reg = 0;
5209         }
5210
5211         ap->rxconfig = rx_cfg_reg;
5212         ret = ANEG_OK;
5213
5214         switch (ap->state) {
5215         case ANEG_STATE_UNKNOWN:
5216                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5217                         ap->state = ANEG_STATE_AN_ENABLE;
5218
5219                 /* fall through */
5220         case ANEG_STATE_AN_ENABLE:
5221                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5222                 if (ap->flags & MR_AN_ENABLE) {
5223                         ap->link_time = 0;
5224                         ap->cur_time = 0;
5225                         ap->ability_match_cfg = 0;
5226                         ap->ability_match_count = 0;
5227                         ap->ability_match = 0;
5228                         ap->idle_match = 0;
5229                         ap->ack_match = 0;
5230
5231                         ap->state = ANEG_STATE_RESTART_INIT;
5232                 } else {
5233                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5234                 }
5235                 break;
5236
5237         case ANEG_STATE_RESTART_INIT:
5238                 ap->link_time = ap->cur_time;
5239                 ap->flags &= ~(MR_NP_LOADED);
5240                 ap->txconfig = 0;
5241                 tw32(MAC_TX_AUTO_NEG, 0);
5242                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5243                 tw32_f(MAC_MODE, tp->mac_mode);
5244                 udelay(40);
5245
5246                 ret = ANEG_TIMER_ENAB;
5247                 ap->state = ANEG_STATE_RESTART;
5248
5249                 /* fall through */
5250         case ANEG_STATE_RESTART:
5251                 delta = ap->cur_time - ap->link_time;
5252                 if (delta > ANEG_STATE_SETTLE_TIME)
5253                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5254                 else
5255                         ret = ANEG_TIMER_ENAB;
5256                 break;
5257
5258         case ANEG_STATE_DISABLE_LINK_OK:
5259                 ret = ANEG_DONE;
5260                 break;
5261
5262         case ANEG_STATE_ABILITY_DETECT_INIT:
5263                 ap->flags &= ~(MR_TOGGLE_TX);
5264                 ap->txconfig = ANEG_CFG_FD;
5265                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5266                 if (flowctrl & ADVERTISE_1000XPAUSE)
5267                         ap->txconfig |= ANEG_CFG_PS1;
5268                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5269                         ap->txconfig |= ANEG_CFG_PS2;
5270                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5271                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5272                 tw32_f(MAC_MODE, tp->mac_mode);
5273                 udelay(40);
5274
5275                 ap->state = ANEG_STATE_ABILITY_DETECT;
5276                 break;
5277
5278         case ANEG_STATE_ABILITY_DETECT:
5279                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5280                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5281                 break;
5282
5283         case ANEG_STATE_ACK_DETECT_INIT:
5284                 ap->txconfig |= ANEG_CFG_ACK;
5285                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5286                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5287                 tw32_f(MAC_MODE, tp->mac_mode);
5288                 udelay(40);
5289
5290                 ap->state = ANEG_STATE_ACK_DETECT;
5291
5292                 /* fall through */
5293         case ANEG_STATE_ACK_DETECT:
5294                 if (ap->ack_match != 0) {
5295                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5296                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5297                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5298                         } else {
5299                                 ap->state = ANEG_STATE_AN_ENABLE;
5300                         }
5301                 } else if (ap->ability_match != 0 &&
5302                            ap->rxconfig == 0) {
5303                         ap->state = ANEG_STATE_AN_ENABLE;
5304                 }
5305                 break;
5306
5307         case ANEG_STATE_COMPLETE_ACK_INIT:
5308                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5309                         ret = ANEG_FAILED;
5310                         break;
5311                 }
5312                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5313                                MR_LP_ADV_HALF_DUPLEX |
5314                                MR_LP_ADV_SYM_PAUSE |
5315                                MR_LP_ADV_ASYM_PAUSE |
5316                                MR_LP_ADV_REMOTE_FAULT1 |
5317                                MR_LP_ADV_REMOTE_FAULT2 |
5318                                MR_LP_ADV_NEXT_PAGE |
5319                                MR_TOGGLE_RX |
5320                                MR_NP_RX);
5321                 if (ap->rxconfig & ANEG_CFG_FD)
5322                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5323                 if (ap->rxconfig & ANEG_CFG_HD)
5324                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5325                 if (ap->rxconfig & ANEG_CFG_PS1)
5326                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5327                 if (ap->rxconfig & ANEG_CFG_PS2)
5328                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5329                 if (ap->rxconfig & ANEG_CFG_RF1)
5330                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5331                 if (ap->rxconfig & ANEG_CFG_RF2)
5332                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5333                 if (ap->rxconfig & ANEG_CFG_NP)
5334                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5335
5336                 ap->link_time = ap->cur_time;
5337
5338                 ap->flags ^= (MR_TOGGLE_TX);
5339                 if (ap->rxconfig & 0x0008)
5340                         ap->flags |= MR_TOGGLE_RX;
5341                 if (ap->rxconfig & ANEG_CFG_NP)
5342                         ap->flags |= MR_NP_RX;
5343                 ap->flags |= MR_PAGE_RX;
5344
5345                 ap->state = ANEG_STATE_COMPLETE_ACK;
5346                 ret = ANEG_TIMER_ENAB;
5347                 break;
5348
5349         case ANEG_STATE_COMPLETE_ACK:
5350                 if (ap->ability_match != 0 &&
5351                     ap->rxconfig == 0) {
5352                         ap->state = ANEG_STATE_AN_ENABLE;
5353                         break;
5354                 }
5355                 delta = ap->cur_time - ap->link_time;
5356                 if (delta > ANEG_STATE_SETTLE_TIME) {
5357                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5358                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5359                         } else {
5360                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5361                                     !(ap->flags & MR_NP_RX)) {
5362                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5363                                 } else {
5364                                         ret = ANEG_FAILED;
5365                                 }
5366                         }
5367                 }
5368                 break;
5369
5370         case ANEG_STATE_IDLE_DETECT_INIT:
5371                 ap->link_time = ap->cur_time;
5372                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5373                 tw32_f(MAC_MODE, tp->mac_mode);
5374                 udelay(40);
5375
5376                 ap->state = ANEG_STATE_IDLE_DETECT;
5377                 ret = ANEG_TIMER_ENAB;
5378                 break;
5379
5380         case ANEG_STATE_IDLE_DETECT:
5381                 if (ap->ability_match != 0 &&
5382                     ap->rxconfig == 0) {
5383                         ap->state = ANEG_STATE_AN_ENABLE;
5384                         break;
5385                 }
5386                 delta = ap->cur_time - ap->link_time;
5387                 if (delta > ANEG_STATE_SETTLE_TIME) {
5388                         /* XXX another gem from the Broadcom driver :( */
5389                         ap->state = ANEG_STATE_LINK_OK;
5390                 }
5391                 break;
5392
5393         case ANEG_STATE_LINK_OK:
5394                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5395                 ret = ANEG_DONE;
5396                 break;
5397
5398         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5399                 /* ??? unimplemented */
5400                 break;
5401
5402         case ANEG_STATE_NEXT_PAGE_WAIT:
5403                 /* ??? unimplemented */
5404                 break;
5405
5406         default:
5407                 ret = ANEG_FAILED;
5408                 break;
5409         }
5410
5411         return ret;
5412 }
5413
5414 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5415 {
5416         int res = 0;
5417         struct tg3_fiber_aneginfo aninfo;
5418         int status = ANEG_FAILED;
5419         unsigned int tick;
5420         u32 tmp;
5421
5422         tw32_f(MAC_TX_AUTO_NEG, 0);
5423
5424         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5425         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5426         udelay(40);
5427
5428         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5429         udelay(40);
5430
5431         memset(&aninfo, 0, sizeof(aninfo));
5432         aninfo.flags |= MR_AN_ENABLE;
5433         aninfo.state = ANEG_STATE_UNKNOWN;
5434         aninfo.cur_time = 0;
5435         tick = 0;
5436         while (++tick < 195000) {
5437                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5438                 if (status == ANEG_DONE || status == ANEG_FAILED)
5439                         break;
5440
5441                 udelay(1);
5442         }
5443
5444         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5445         tw32_f(MAC_MODE, tp->mac_mode);
5446         udelay(40);
5447
5448         *txflags = aninfo.txconfig;
5449         *rxflags = aninfo.flags;
5450
5451         if (status == ANEG_DONE &&
5452             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5453                              MR_LP_ADV_FULL_DUPLEX)))
5454                 res = 1;
5455
5456         return res;
5457 }
5458
5459 static void tg3_init_bcm8002(struct tg3 *tp)
5460 {
5461         u32 mac_status = tr32(MAC_STATUS);
5462         int i;
5463
5464         /* Reset when initting first time or we have a link. */
5465         if (tg3_flag(tp, INIT_COMPLETE) &&
5466             !(mac_status & MAC_STATUS_PCS_SYNCED))
5467                 return;
5468
5469         /* Set PLL lock range. */
5470         tg3_writephy(tp, 0x16, 0x8007);
5471
5472         /* SW reset */
5473         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5474
5475         /* Wait for reset to complete. */
5476         /* XXX schedule_timeout() ... */
5477         for (i = 0; i < 500; i++)
5478                 udelay(10);
5479
5480         /* Config mode; select PMA/Ch 1 regs. */
5481         tg3_writephy(tp, 0x10, 0x8411);
5482
5483         /* Enable auto-lock and comdet, select txclk for tx. */
5484         tg3_writephy(tp, 0x11, 0x0a10);
5485
5486         tg3_writephy(tp, 0x18, 0x00a0);
5487         tg3_writephy(tp, 0x16, 0x41ff);
5488
5489         /* Assert and deassert POR. */
5490         tg3_writephy(tp, 0x13, 0x0400);
5491         udelay(40);
5492         tg3_writephy(tp, 0x13, 0x0000);
5493
5494         tg3_writephy(tp, 0x11, 0x0a50);
5495         udelay(40);
5496         tg3_writephy(tp, 0x11, 0x0a10);
5497
5498         /* Wait for signal to stabilize */
5499         /* XXX schedule_timeout() ... */
5500         for (i = 0; i < 15000; i++)
5501                 udelay(10);
5502
5503         /* Deselect the channel register so we can read the PHYID
5504          * later.
5505          */
5506         tg3_writephy(tp, 0x10, 0x8011);
5507 }
5508
5509 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5510 {
5511         u16 flowctrl;
5512         bool current_link_up;
5513         u32 sg_dig_ctrl, sg_dig_status;
5514         u32 serdes_cfg, expected_sg_dig_ctrl;
5515         int workaround, port_a;
5516
5517         serdes_cfg = 0;
5518         expected_sg_dig_ctrl = 0;
5519         workaround = 0;
5520         port_a = 1;
5521         current_link_up = false;
5522
5523         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5524             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5525                 workaround = 1;
5526                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5527                         port_a = 0;
5528
5529                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5530                 /* preserve bits 20-23 for voltage regulator */
5531                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5532         }
5533
5534         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5535
5536         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5537                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5538                         if (workaround) {
5539                                 u32 val = serdes_cfg;
5540
5541                                 if (port_a)
5542                                         val |= 0xc010000;
5543                                 else
5544                                         val |= 0x4010000;
5545                                 tw32_f(MAC_SERDES_CFG, val);
5546                         }
5547
5548                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5549                 }
5550                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5551                         tg3_setup_flow_control(tp, 0, 0);
5552                         current_link_up = true;
5553                 }
5554                 goto out;
5555         }
5556
5557         /* Want auto-negotiation.  */
5558         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5559
5560         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5561         if (flowctrl & ADVERTISE_1000XPAUSE)
5562                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5563         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5564                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5565
5566         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5567                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5568                     tp->serdes_counter &&
5569                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5570                                     MAC_STATUS_RCVD_CFG)) ==
5571                      MAC_STATUS_PCS_SYNCED)) {
5572                         tp->serdes_counter--;
5573                         current_link_up = true;
5574                         goto out;
5575                 }
5576 restart_autoneg:
5577                 if (workaround)
5578                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5579                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5580                 udelay(5);
5581                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5582
5583                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5584                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5585         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5586                                  MAC_STATUS_SIGNAL_DET)) {
5587                 sg_dig_status = tr32(SG_DIG_STATUS);
5588                 mac_status = tr32(MAC_STATUS);
5589
5590                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5591                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5592                         u32 local_adv = 0, remote_adv = 0;
5593
5594                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5595                                 local_adv |= ADVERTISE_1000XPAUSE;
5596                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5597                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5598
5599                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5600                                 remote_adv |= LPA_1000XPAUSE;
5601                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5602                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5603
5604                         tp->link_config.rmt_adv =
5605                                            mii_adv_to_ethtool_adv_x(remote_adv);
5606
5607                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5608                         current_link_up = true;
5609                         tp->serdes_counter = 0;
5610                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5611                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5612                         if (tp->serdes_counter)
5613                                 tp->serdes_counter--;
5614                         else {
5615                                 if (workaround) {
5616                                         u32 val = serdes_cfg;
5617
5618                                         if (port_a)
5619                                                 val |= 0xc010000;
5620                                         else
5621                                                 val |= 0x4010000;
5622
5623                                         tw32_f(MAC_SERDES_CFG, val);
5624                                 }
5625
5626                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5627                                 udelay(40);
5628
5629                                 /* Link parallel detection - link is up */
5630                                 /* only if we have PCS_SYNC and not */
5631                                 /* receiving config code words */
5632                                 mac_status = tr32(MAC_STATUS);
5633                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5634                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5635                                         tg3_setup_flow_control(tp, 0, 0);
5636                                         current_link_up = true;
5637                                         tp->phy_flags |=
5638                                                 TG3_PHYFLG_PARALLEL_DETECT;
5639                                         tp->serdes_counter =
5640                                                 SERDES_PARALLEL_DET_TIMEOUT;
5641                                 } else
5642                                         goto restart_autoneg;
5643                         }
5644                 }
5645         } else {
5646                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5647                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5648         }
5649
5650 out:
5651         return current_link_up;
5652 }
5653
5654 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5655 {
5656         bool current_link_up = false;
5657
5658         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5659                 goto out;
5660
5661         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5662                 u32 txflags, rxflags;
5663                 int i;
5664
5665                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5666                         u32 local_adv = 0, remote_adv = 0;
5667
5668                         if (txflags & ANEG_CFG_PS1)
5669                                 local_adv |= ADVERTISE_1000XPAUSE;
5670                         if (txflags & ANEG_CFG_PS2)
5671                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5672
5673                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5674                                 remote_adv |= LPA_1000XPAUSE;
5675                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5676                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5677
5678                         tp->link_config.rmt_adv =
5679                                            mii_adv_to_ethtool_adv_x(remote_adv);
5680
5681                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5682
5683                         current_link_up = true;
5684                 }
5685                 for (i = 0; i < 30; i++) {
5686                         udelay(20);
5687                         tw32_f(MAC_STATUS,
5688                                (MAC_STATUS_SYNC_CHANGED |
5689                                 MAC_STATUS_CFG_CHANGED));
5690                         udelay(40);
5691                         if ((tr32(MAC_STATUS) &
5692                              (MAC_STATUS_SYNC_CHANGED |
5693                               MAC_STATUS_CFG_CHANGED)) == 0)
5694                                 break;
5695                 }
5696
5697                 mac_status = tr32(MAC_STATUS);
5698                 if (!current_link_up &&
5699                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5700                     !(mac_status & MAC_STATUS_RCVD_CFG))
5701                         current_link_up = true;
5702         } else {
5703                 tg3_setup_flow_control(tp, 0, 0);
5704
5705                 /* Forcing 1000FD link up. */
5706                 current_link_up = true;
5707
5708                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5709                 udelay(40);
5710
5711                 tw32_f(MAC_MODE, tp->mac_mode);
5712                 udelay(40);
5713         }
5714
5715 out:
5716         return current_link_up;
5717 }
5718
5719 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5720 {
5721         u32 orig_pause_cfg;
5722         u32 orig_active_speed;
5723         u8 orig_active_duplex;
5724         u32 mac_status;
5725         bool current_link_up;
5726         int i;
5727
5728         orig_pause_cfg = tp->link_config.active_flowctrl;
5729         orig_active_speed = tp->link_config.active_speed;
5730         orig_active_duplex = tp->link_config.active_duplex;
5731
5732         if (!tg3_flag(tp, HW_AUTONEG) &&
5733             tp->link_up &&
5734             tg3_flag(tp, INIT_COMPLETE)) {
5735                 mac_status = tr32(MAC_STATUS);
5736                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5737                                MAC_STATUS_SIGNAL_DET |
5738                                MAC_STATUS_CFG_CHANGED |
5739                                MAC_STATUS_RCVD_CFG);
5740                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5741                                    MAC_STATUS_SIGNAL_DET)) {
5742                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5743                                             MAC_STATUS_CFG_CHANGED));
5744                         return 0;
5745                 }
5746         }
5747
5748         tw32_f(MAC_TX_AUTO_NEG, 0);
5749
5750         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5751         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5752         tw32_f(MAC_MODE, tp->mac_mode);
5753         udelay(40);
5754
5755         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5756                 tg3_init_bcm8002(tp);
5757
5758         /* Enable link change event even when serdes polling.  */
5759         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5760         udelay(40);
5761
5762         current_link_up = false;
5763         tp->link_config.rmt_adv = 0;
5764         mac_status = tr32(MAC_STATUS);
5765
5766         if (tg3_flag(tp, HW_AUTONEG))
5767                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5768         else
5769                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5770
5771         tp->napi[0].hw_status->status =
5772                 (SD_STATUS_UPDATED |
5773                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5774
5775         for (i = 0; i < 100; i++) {
5776                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5777                                     MAC_STATUS_CFG_CHANGED));
5778                 udelay(5);
5779                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5780                                          MAC_STATUS_CFG_CHANGED |
5781                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5782                         break;
5783         }
5784
5785         mac_status = tr32(MAC_STATUS);
5786         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5787                 current_link_up = false;
5788                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5789                     tp->serdes_counter == 0) {
5790                         tw32_f(MAC_MODE, (tp->mac_mode |
5791                                           MAC_MODE_SEND_CONFIGS));
5792                         udelay(1);
5793                         tw32_f(MAC_MODE, tp->mac_mode);
5794                 }
5795         }
5796
5797         if (current_link_up) {
5798                 tp->link_config.active_speed = SPEED_1000;
5799                 tp->link_config.active_duplex = DUPLEX_FULL;
5800                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5801                                     LED_CTRL_LNKLED_OVERRIDE |
5802                                     LED_CTRL_1000MBPS_ON));
5803         } else {
5804                 tp->link_config.active_speed = SPEED_UNKNOWN;
5805                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5806                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5807                                     LED_CTRL_LNKLED_OVERRIDE |
5808                                     LED_CTRL_TRAFFIC_OVERRIDE));
5809         }
5810
5811         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5812                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5813                 if (orig_pause_cfg != now_pause_cfg ||
5814                     orig_active_speed != tp->link_config.active_speed ||
5815                     orig_active_duplex != tp->link_config.active_duplex)
5816                         tg3_link_report(tp);
5817         }
5818
5819         return 0;
5820 }
5821
5822 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5823 {
5824         int err = 0;
5825         u32 bmsr, bmcr;
5826         u32 current_speed = SPEED_UNKNOWN;
5827         u8 current_duplex = DUPLEX_UNKNOWN;
5828         bool current_link_up = false;
5829         u32 local_adv, remote_adv, sgsr;
5830
5831         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5832              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5833              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5834              (sgsr & SERDES_TG3_SGMII_MODE)) {
5835
5836                 if (force_reset)
5837                         tg3_phy_reset(tp);
5838
5839                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5840
5841                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5842                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5843                 } else {
5844                         current_link_up = true;
5845                         if (sgsr & SERDES_TG3_SPEED_1000) {
5846                                 current_speed = SPEED_1000;
5847                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5848                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5849                                 current_speed = SPEED_100;
5850                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5851                         } else {
5852                                 current_speed = SPEED_10;
5853                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5854                         }
5855
5856                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5857                                 current_duplex = DUPLEX_FULL;
5858                         else
5859                                 current_duplex = DUPLEX_HALF;
5860                 }
5861
5862                 tw32_f(MAC_MODE, tp->mac_mode);
5863                 udelay(40);
5864
5865                 tg3_clear_mac_status(tp);
5866
5867                 goto fiber_setup_done;
5868         }
5869
5870         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5871         tw32_f(MAC_MODE, tp->mac_mode);
5872         udelay(40);
5873
5874         tg3_clear_mac_status(tp);
5875
5876         if (force_reset)
5877                 tg3_phy_reset(tp);
5878
5879         tp->link_config.rmt_adv = 0;
5880
5881         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5882         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5883         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5884                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5885                         bmsr |= BMSR_LSTATUS;
5886                 else
5887                         bmsr &= ~BMSR_LSTATUS;
5888         }
5889
5890         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5891
5892         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5893             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5894                 /* do nothing, just check for link up at the end */
5895         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5896                 u32 adv, newadv;
5897
5898                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5899                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5900                                  ADVERTISE_1000XPAUSE |
5901                                  ADVERTISE_1000XPSE_ASYM |
5902                                  ADVERTISE_SLCT);
5903
5904                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5905                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5906
5907                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5908                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5909                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5910                         tg3_writephy(tp, MII_BMCR, bmcr);
5911
5912                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5913                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5914                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5915
5916                         return err;
5917                 }
5918         } else {
5919                 u32 new_bmcr;
5920
5921                 bmcr &= ~BMCR_SPEED1000;
5922                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5923
5924                 if (tp->link_config.duplex == DUPLEX_FULL)
5925                         new_bmcr |= BMCR_FULLDPLX;
5926
5927                 if (new_bmcr != bmcr) {
5928                         /* BMCR_SPEED1000 is a reserved bit that needs
5929                          * to be set on write.
5930                          */
5931                         new_bmcr |= BMCR_SPEED1000;
5932
5933                         /* Force a linkdown */
5934                         if (tp->link_up) {
5935                                 u32 adv;
5936
5937                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5938                                 adv &= ~(ADVERTISE_1000XFULL |
5939                                          ADVERTISE_1000XHALF |
5940                                          ADVERTISE_SLCT);
5941                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5942                                 tg3_writephy(tp, MII_BMCR, bmcr |
5943                                                            BMCR_ANRESTART |
5944                                                            BMCR_ANENABLE);
5945                                 udelay(10);
5946                                 tg3_carrier_off(tp);
5947                         }
5948                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5949                         bmcr = new_bmcr;
5950                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5951                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5952                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5953                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5954                                         bmsr |= BMSR_LSTATUS;
5955                                 else
5956                                         bmsr &= ~BMSR_LSTATUS;
5957                         }
5958                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5959                 }
5960         }
5961
5962         if (bmsr & BMSR_LSTATUS) {
5963                 current_speed = SPEED_1000;
5964                 current_link_up = true;
5965                 if (bmcr & BMCR_FULLDPLX)
5966                         current_duplex = DUPLEX_FULL;
5967                 else
5968                         current_duplex = DUPLEX_HALF;
5969
5970                 local_adv = 0;
5971                 remote_adv = 0;
5972
5973                 if (bmcr & BMCR_ANENABLE) {
5974                         u32 common;
5975
5976                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5977                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5978                         common = local_adv & remote_adv;
5979                         if (common & (ADVERTISE_1000XHALF |
5980                                       ADVERTISE_1000XFULL)) {
5981                                 if (common & ADVERTISE_1000XFULL)
5982                                         current_duplex = DUPLEX_FULL;
5983                                 else
5984                                         current_duplex = DUPLEX_HALF;
5985
5986                                 tp->link_config.rmt_adv =
5987                                            mii_adv_to_ethtool_adv_x(remote_adv);
5988                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5989                                 /* Link is up via parallel detect */
5990                         } else {
5991                                 current_link_up = false;
5992                         }
5993                 }
5994         }
5995
5996 fiber_setup_done:
5997         if (current_link_up && current_duplex == DUPLEX_FULL)
5998                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5999
6000         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6001         if (tp->link_config.active_duplex == DUPLEX_HALF)
6002                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6003
6004         tw32_f(MAC_MODE, tp->mac_mode);
6005         udelay(40);
6006
6007         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6008
6009         tp->link_config.active_speed = current_speed;
6010         tp->link_config.active_duplex = current_duplex;
6011
6012         tg3_test_and_report_link_chg(tp, current_link_up);
6013         return err;
6014 }
6015
6016 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6017 {
6018         if (tp->serdes_counter) {
6019                 /* Give autoneg time to complete. */
6020                 tp->serdes_counter--;
6021                 return;
6022         }
6023
6024         if (!tp->link_up &&
6025             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6026                 u32 bmcr;
6027
6028                 tg3_readphy(tp, MII_BMCR, &bmcr);
6029                 if (bmcr & BMCR_ANENABLE) {
6030                         u32 phy1, phy2;
6031
6032                         /* Select shadow register 0x1f */
6033                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6034                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6035
6036                         /* Select expansion interrupt status register */
6037                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6038                                          MII_TG3_DSP_EXP1_INT_STAT);
6039                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6041
6042                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6043                                 /* We have signal detect and not receiving
6044                                  * config code words, link is up by parallel
6045                                  * detection.
6046                                  */
6047
6048                                 bmcr &= ~BMCR_ANENABLE;
6049                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6050                                 tg3_writephy(tp, MII_BMCR, bmcr);
6051                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6052                         }
6053                 }
6054         } else if (tp->link_up &&
6055                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6056                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6057                 u32 phy2;
6058
6059                 /* Select expansion interrupt status register */
6060                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6061                                  MII_TG3_DSP_EXP1_INT_STAT);
6062                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6063                 if (phy2 & 0x20) {
6064                         u32 bmcr;
6065
6066                         /* Config code words received, turn on autoneg. */
6067                         tg3_readphy(tp, MII_BMCR, &bmcr);
6068                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6069
6070                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6071
6072                 }
6073         }
6074 }
6075
6076 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6077 {
6078         u32 val;
6079         int err;
6080
6081         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6082                 err = tg3_setup_fiber_phy(tp, force_reset);
6083         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6084                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6085         else
6086                 err = tg3_setup_copper_phy(tp, force_reset);
6087
6088         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6089                 u32 scale;
6090
6091                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6092                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6093                         scale = 65;
6094                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6095                         scale = 6;
6096                 else
6097                         scale = 12;
6098
6099                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6100                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6101                 tw32(GRC_MISC_CFG, val);
6102         }
6103
6104         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6105               (6 << TX_LENGTHS_IPG_SHIFT);
6106         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6107             tg3_asic_rev(tp) == ASIC_REV_5762)
6108                 val |= tr32(MAC_TX_LENGTHS) &
6109                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6110                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6111
6112         if (tp->link_config.active_speed == SPEED_1000 &&
6113             tp->link_config.active_duplex == DUPLEX_HALF)
6114                 tw32(MAC_TX_LENGTHS, val |
6115                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6116         else
6117                 tw32(MAC_TX_LENGTHS, val |
6118                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6119
6120         if (!tg3_flag(tp, 5705_PLUS)) {
6121                 if (tp->link_up) {
6122                         tw32(HOSTCC_STAT_COAL_TICKS,
6123                              tp->coal.stats_block_coalesce_usecs);
6124                 } else {
6125                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6126                 }
6127         }
6128
6129         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6130                 val = tr32(PCIE_PWR_MGMT_THRESH);
6131                 if (!tp->link_up)
6132                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6133                               tp->pwrmgmt_thresh;
6134                 else
6135                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6136                 tw32(PCIE_PWR_MGMT_THRESH, val);
6137         }
6138
6139         return err;
6140 }
6141
6142 /* tp->lock must be held */
6143 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6144 {
6145         u64 stamp;
6146
6147         ptp_read_system_prets(sts);
6148         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6149         ptp_read_system_postts(sts);
6150         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6151
6152         return stamp;
6153 }
6154
6155 /* tp->lock must be held */
6156 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6157 {
6158         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6159
6160         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6161         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6162         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6163         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6164 }
6165
6166 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6167 static inline void tg3_full_unlock(struct tg3 *tp);
6168 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6169 {
6170         struct tg3 *tp = netdev_priv(dev);
6171
6172         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6173                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6174                                 SOF_TIMESTAMPING_SOFTWARE;
6175
6176         if (tg3_flag(tp, PTP_CAPABLE)) {
6177                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6178                                         SOF_TIMESTAMPING_RX_HARDWARE |
6179                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6180         }
6181
6182         if (tp->ptp_clock)
6183                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6184         else
6185                 info->phc_index = -1;
6186
6187         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6188
6189         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6190                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6191                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6192                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6193         return 0;
6194 }
6195
6196 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6197 {
6198         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6199         bool neg_adj = false;
6200         u32 correction = 0;
6201
6202         if (ppb < 0) {
6203                 neg_adj = true;
6204                 ppb = -ppb;
6205         }
6206
6207         /* Frequency adjustment is performed using hardware with a 24 bit
6208          * accumulator and a programmable correction value. On each clk, the
6209          * correction value gets added to the accumulator and when it
6210          * overflows, the time counter is incremented/decremented.
6211          *
6212          * So conversion from ppb to correction value is
6213          *              ppb * (1 << 24) / 1000000000
6214          */
6215         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6216                      TG3_EAV_REF_CLK_CORRECT_MASK;
6217
6218         tg3_full_lock(tp, 0);
6219
6220         if (correction)
6221                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6222                      TG3_EAV_REF_CLK_CORRECT_EN |
6223                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6224         else
6225                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6226
6227         tg3_full_unlock(tp);
6228
6229         return 0;
6230 }
6231
6232 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6233 {
6234         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235
6236         tg3_full_lock(tp, 0);
6237         tp->ptp_adjust += delta;
6238         tg3_full_unlock(tp);
6239
6240         return 0;
6241 }
6242
6243 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6244                             struct ptp_system_timestamp *sts)
6245 {
6246         u64 ns;
6247         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6248
6249         tg3_full_lock(tp, 0);
6250         ns = tg3_refclk_read(tp, sts);
6251         ns += tp->ptp_adjust;
6252         tg3_full_unlock(tp);
6253
6254         *ts = ns_to_timespec64(ns);
6255
6256         return 0;
6257 }
6258
6259 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6260                            const struct timespec64 *ts)
6261 {
6262         u64 ns;
6263         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6264
6265         ns = timespec64_to_ns(ts);
6266
6267         tg3_full_lock(tp, 0);
6268         tg3_refclk_write(tp, ns);
6269         tp->ptp_adjust = 0;
6270         tg3_full_unlock(tp);
6271
6272         return 0;
6273 }
6274
6275 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6276                           struct ptp_clock_request *rq, int on)
6277 {
6278         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6279         u32 clock_ctl;
6280         int rval = 0;
6281
6282         switch (rq->type) {
6283         case PTP_CLK_REQ_PEROUT:
6284                 if (rq->perout.index != 0)
6285                         return -EINVAL;
6286
6287                 tg3_full_lock(tp, 0);
6288                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6289                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6290
6291                 if (on) {
6292                         u64 nsec;
6293
6294                         nsec = rq->perout.start.sec * 1000000000ULL +
6295                                rq->perout.start.nsec;
6296
6297                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6298                                 netdev_warn(tp->dev,
6299                                             "Device supports only a one-shot timesync output, period must be 0\n");
6300                                 rval = -EINVAL;
6301                                 goto err_out;
6302                         }
6303
6304                         if (nsec & (1ULL << 63)) {
6305                                 netdev_warn(tp->dev,
6306                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6307                                 rval = -EINVAL;
6308                                 goto err_out;
6309                         }
6310
6311                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6312                         tw32(TG3_EAV_WATCHDOG0_MSB,
6313                              TG3_EAV_WATCHDOG0_EN |
6314                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6315
6316                         tw32(TG3_EAV_REF_CLCK_CTL,
6317                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6318                 } else {
6319                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6320                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6321                 }
6322
6323 err_out:
6324                 tg3_full_unlock(tp);
6325                 return rval;
6326
6327         default:
6328                 break;
6329         }
6330
6331         return -EOPNOTSUPP;
6332 }
6333
6334 static const struct ptp_clock_info tg3_ptp_caps = {
6335         .owner          = THIS_MODULE,
6336         .name           = "tg3 clock",
6337         .max_adj        = 250000000,
6338         .n_alarm        = 0,
6339         .n_ext_ts       = 0,
6340         .n_per_out      = 1,
6341         .n_pins         = 0,
6342         .pps            = 0,
6343         .adjfreq        = tg3_ptp_adjfreq,
6344         .adjtime        = tg3_ptp_adjtime,
6345         .gettimex64     = tg3_ptp_gettimex,
6346         .settime64      = tg3_ptp_settime,
6347         .enable         = tg3_ptp_enable,
6348 };
6349
6350 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6351                                      struct skb_shared_hwtstamps *timestamp)
6352 {
6353         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6354         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6355                                            tp->ptp_adjust);
6356 }
6357
6358 /* tp->lock must be held */
6359 static void tg3_ptp_init(struct tg3 *tp)
6360 {
6361         if (!tg3_flag(tp, PTP_CAPABLE))
6362                 return;
6363
6364         /* Initialize the hardware clock to the system time. */
6365         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6366         tp->ptp_adjust = 0;
6367         tp->ptp_info = tg3_ptp_caps;
6368 }
6369
6370 /* tp->lock must be held */
6371 static void tg3_ptp_resume(struct tg3 *tp)
6372 {
6373         if (!tg3_flag(tp, PTP_CAPABLE))
6374                 return;
6375
6376         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6377         tp->ptp_adjust = 0;
6378 }
6379
6380 static void tg3_ptp_fini(struct tg3 *tp)
6381 {
6382         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6383                 return;
6384
6385         ptp_clock_unregister(tp->ptp_clock);
6386         tp->ptp_clock = NULL;
6387         tp->ptp_adjust = 0;
6388 }
6389
6390 static inline int tg3_irq_sync(struct tg3 *tp)
6391 {
6392         return tp->irq_sync;
6393 }
6394
6395 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6396 {
6397         int i;
6398
6399         dst = (u32 *)((u8 *)dst + off);
6400         for (i = 0; i < len; i += sizeof(u32))
6401                 *dst++ = tr32(off + i);
6402 }
6403
6404 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6405 {
6406         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6407         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6408         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6409         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6410         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6411         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6412         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6413         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6414         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6415         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6416         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6417         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6418         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6419         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6420         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6421         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6422         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6423         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6424         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6425
6426         if (tg3_flag(tp, SUPPORT_MSIX))
6427                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6428
6429         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6430         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6431         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6432         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6433         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6434         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6435         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6436         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6437
6438         if (!tg3_flag(tp, 5705_PLUS)) {
6439                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6440                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6441                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6442         }
6443
6444         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6445         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6446         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6447         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6448         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6449
6450         if (tg3_flag(tp, NVRAM))
6451                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6452 }
6453
6454 static void tg3_dump_state(struct tg3 *tp)
6455 {
6456         int i;
6457         u32 *regs;
6458
6459         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6460         if (!regs)
6461                 return;
6462
6463         if (tg3_flag(tp, PCI_EXPRESS)) {
6464                 /* Read up to but not including private PCI registers */
6465                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6466                         regs[i / sizeof(u32)] = tr32(i);
6467         } else
6468                 tg3_dump_legacy_regs(tp, regs);
6469
6470         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6471                 if (!regs[i + 0] && !regs[i + 1] &&
6472                     !regs[i + 2] && !regs[i + 3])
6473                         continue;
6474
6475                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6476                            i * 4,
6477                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6478         }
6479
6480         kfree(regs);
6481
6482         for (i = 0; i < tp->irq_cnt; i++) {
6483                 struct tg3_napi *tnapi = &tp->napi[i];
6484
6485                 /* SW status block */
6486                 netdev_err(tp->dev,
6487                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6488                            i,
6489                            tnapi->hw_status->status,
6490                            tnapi->hw_status->status_tag,
6491                            tnapi->hw_status->rx_jumbo_consumer,
6492                            tnapi->hw_status->rx_consumer,
6493                            tnapi->hw_status->rx_mini_consumer,
6494                            tnapi->hw_status->idx[0].rx_producer,
6495                            tnapi->hw_status->idx[0].tx_consumer);
6496
6497                 netdev_err(tp->dev,
6498                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6499                            i,
6500                            tnapi->last_tag, tnapi->last_irq_tag,
6501                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6502                            tnapi->rx_rcb_ptr,
6503                            tnapi->prodring.rx_std_prod_idx,
6504                            tnapi->prodring.rx_std_cons_idx,
6505                            tnapi->prodring.rx_jmb_prod_idx,
6506                            tnapi->prodring.rx_jmb_cons_idx);
6507         }
6508 }
6509
6510 /* This is called whenever we suspect that the system chipset is re-
6511  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6512  * is bogus tx completions. We try to recover by setting the
6513  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6514  * in the workqueue.
6515  */
6516 static void tg3_tx_recover(struct tg3 *tp)
6517 {
6518         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6519                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6520
6521         netdev_warn(tp->dev,
6522                     "The system may be re-ordering memory-mapped I/O "
6523                     "cycles to the network device, attempting to recover. "
6524                     "Please report the problem to the driver maintainer "
6525                     "and include system chipset information.\n");
6526
6527         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6528 }
6529
6530 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6531 {
6532         /* Tell compiler to fetch tx indices from memory. */
6533         barrier();
6534         return tnapi->tx_pending -
6535                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6536 }
6537
6538 /* Tigon3 never reports partial packet sends.  So we do not
6539  * need special logic to handle SKBs that have not had all
6540  * of their frags sent yet, like SunGEM does.
6541  */
6542 static void tg3_tx(struct tg3_napi *tnapi)
6543 {
6544         struct tg3 *tp = tnapi->tp;
6545         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6546         u32 sw_idx = tnapi->tx_cons;
6547         struct netdev_queue *txq;
6548         int index = tnapi - tp->napi;
6549         unsigned int pkts_compl = 0, bytes_compl = 0;
6550
6551         if (tg3_flag(tp, ENABLE_TSS))
6552                 index--;
6553
6554         txq = netdev_get_tx_queue(tp->dev, index);
6555
6556         while (sw_idx != hw_idx) {
6557                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6558                 struct sk_buff *skb = ri->skb;
6559                 int i, tx_bug = 0;
6560
6561                 if (unlikely(skb == NULL)) {
6562                         tg3_tx_recover(tp);
6563                         return;
6564                 }
6565
6566                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6567                         struct skb_shared_hwtstamps timestamp;
6568                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6569                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6570
6571                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6572
6573                         skb_tstamp_tx(skb, &timestamp);
6574                 }
6575
6576                 pci_unmap_single(tp->pdev,
6577                                  dma_unmap_addr(ri, mapping),
6578                                  skb_headlen(skb),
6579                                  PCI_DMA_TODEVICE);
6580
6581                 ri->skb = NULL;
6582
6583                 while (ri->fragmented) {
6584                         ri->fragmented = false;
6585                         sw_idx = NEXT_TX(sw_idx);
6586                         ri = &tnapi->tx_buffers[sw_idx];
6587                 }
6588
6589                 sw_idx = NEXT_TX(sw_idx);
6590
6591                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6592                         ri = &tnapi->tx_buffers[sw_idx];
6593                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6594                                 tx_bug = 1;
6595
6596                         pci_unmap_page(tp->pdev,
6597                                        dma_unmap_addr(ri, mapping),
6598                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6599                                        PCI_DMA_TODEVICE);
6600
6601                         while (ri->fragmented) {
6602                                 ri->fragmented = false;
6603                                 sw_idx = NEXT_TX(sw_idx);
6604                                 ri = &tnapi->tx_buffers[sw_idx];
6605                         }
6606
6607                         sw_idx = NEXT_TX(sw_idx);
6608                 }
6609
6610                 pkts_compl++;
6611                 bytes_compl += skb->len;
6612
6613                 dev_consume_skb_any(skb);
6614
6615                 if (unlikely(tx_bug)) {
6616                         tg3_tx_recover(tp);
6617                         return;
6618                 }
6619         }
6620
6621         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6622
6623         tnapi->tx_cons = sw_idx;
6624
6625         /* Need to make the tx_cons update visible to tg3_start_xmit()
6626          * before checking for netif_queue_stopped().  Without the
6627          * memory barrier, there is a small possibility that tg3_start_xmit()
6628          * will miss it and cause the queue to be stopped forever.
6629          */
6630         smp_mb();
6631
6632         if (unlikely(netif_tx_queue_stopped(txq) &&
6633                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6634                 __netif_tx_lock(txq, smp_processor_id());
6635                 if (netif_tx_queue_stopped(txq) &&
6636                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6637                         netif_tx_wake_queue(txq);
6638                 __netif_tx_unlock(txq);
6639         }
6640 }
6641
6642 static void tg3_frag_free(bool is_frag, void *data)
6643 {
6644         if (is_frag)
6645                 skb_free_frag(data);
6646         else
6647                 kfree(data);
6648 }
6649
6650 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6651 {
6652         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6653                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6654
6655         if (!ri->data)
6656                 return;
6657
6658         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6659                          map_sz, PCI_DMA_FROMDEVICE);
6660         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6661         ri->data = NULL;
6662 }
6663
6664
6665 /* Returns size of skb allocated or < 0 on error.
6666  *
6667  * We only need to fill in the address because the other members
6668  * of the RX descriptor are invariant, see tg3_init_rings.
6669  *
6670  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6671  * posting buffers we only dirty the first cache line of the RX
6672  * descriptor (containing the address).  Whereas for the RX status
6673  * buffers the cpu only reads the last cacheline of the RX descriptor
6674  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6675  */
6676 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6677                              u32 opaque_key, u32 dest_idx_unmasked,
6678                              unsigned int *frag_size)
6679 {
6680         struct tg3_rx_buffer_desc *desc;
6681         struct ring_info *map;
6682         u8 *data;
6683         dma_addr_t mapping;
6684         int skb_size, data_size, dest_idx;
6685
6686         switch (opaque_key) {
6687         case RXD_OPAQUE_RING_STD:
6688                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6689                 desc = &tpr->rx_std[dest_idx];
6690                 map = &tpr->rx_std_buffers[dest_idx];
6691                 data_size = tp->rx_pkt_map_sz;
6692                 break;
6693
6694         case RXD_OPAQUE_RING_JUMBO:
6695                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6696                 desc = &tpr->rx_jmb[dest_idx].std;
6697                 map = &tpr->rx_jmb_buffers[dest_idx];
6698                 data_size = TG3_RX_JMB_MAP_SZ;
6699                 break;
6700
6701         default:
6702                 return -EINVAL;
6703         }
6704
6705         /* Do not overwrite any of the map or rp information
6706          * until we are sure we can commit to a new buffer.
6707          *
6708          * Callers depend upon this behavior and assume that
6709          * we leave everything unchanged if we fail.
6710          */
6711         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6712                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6713         if (skb_size <= PAGE_SIZE) {
6714                 data = netdev_alloc_frag(skb_size);
6715                 *frag_size = skb_size;
6716         } else {
6717                 data = kmalloc(skb_size, GFP_ATOMIC);
6718                 *frag_size = 0;
6719         }
6720         if (!data)
6721                 return -ENOMEM;
6722
6723         mapping = pci_map_single(tp->pdev,
6724                                  data + TG3_RX_OFFSET(tp),
6725                                  data_size,
6726                                  PCI_DMA_FROMDEVICE);
6727         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6728                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6729                 return -EIO;
6730         }
6731
6732         map->data = data;
6733         dma_unmap_addr_set(map, mapping, mapping);
6734
6735         desc->addr_hi = ((u64)mapping >> 32);
6736         desc->addr_lo = ((u64)mapping & 0xffffffff);
6737
6738         return data_size;
6739 }
6740
6741 /* We only need to move over in the address because the other
6742  * members of the RX descriptor are invariant.  See notes above
6743  * tg3_alloc_rx_data for full details.
6744  */
6745 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6746                            struct tg3_rx_prodring_set *dpr,
6747                            u32 opaque_key, int src_idx,
6748                            u32 dest_idx_unmasked)
6749 {
6750         struct tg3 *tp = tnapi->tp;
6751         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6752         struct ring_info *src_map, *dest_map;
6753         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6754         int dest_idx;
6755
6756         switch (opaque_key) {
6757         case RXD_OPAQUE_RING_STD:
6758                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6759                 dest_desc = &dpr->rx_std[dest_idx];
6760                 dest_map = &dpr->rx_std_buffers[dest_idx];
6761                 src_desc = &spr->rx_std[src_idx];
6762                 src_map = &spr->rx_std_buffers[src_idx];
6763                 break;
6764
6765         case RXD_OPAQUE_RING_JUMBO:
6766                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6767                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6768                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6769                 src_desc = &spr->rx_jmb[src_idx].std;
6770                 src_map = &spr->rx_jmb_buffers[src_idx];
6771                 break;
6772
6773         default:
6774                 return;
6775         }
6776
6777         dest_map->data = src_map->data;
6778         dma_unmap_addr_set(dest_map, mapping,
6779                            dma_unmap_addr(src_map, mapping));
6780         dest_desc->addr_hi = src_desc->addr_hi;
6781         dest_desc->addr_lo = src_desc->addr_lo;
6782
6783         /* Ensure that the update to the skb happens after the physical
6784          * addresses have been transferred to the new BD location.
6785          */
6786         smp_wmb();
6787
6788         src_map->data = NULL;
6789 }
6790
6791 /* The RX ring scheme is composed of multiple rings which post fresh
6792  * buffers to the chip, and one special ring the chip uses to report
6793  * status back to the host.
6794  *
6795  * The special ring reports the status of received packets to the
6796  * host.  The chip does not write into the original descriptor the
6797  * RX buffer was obtained from.  The chip simply takes the original
6798  * descriptor as provided by the host, updates the status and length
6799  * field, then writes this into the next status ring entry.
6800  *
6801  * Each ring the host uses to post buffers to the chip is described
6802  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6803  * it is first placed into the on-chip ram.  When the packet's length
6804  * is known, it walks down the TG3_BDINFO entries to select the ring.
6805  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6806  * which is within the range of the new packet's length is chosen.
6807  *
6808  * The "separate ring for rx status" scheme may sound queer, but it makes
6809  * sense from a cache coherency perspective.  If only the host writes
6810  * to the buffer post rings, and only the chip writes to the rx status
6811  * rings, then cache lines never move beyond shared-modified state.
6812  * If both the host and chip were to write into the same ring, cache line
6813  * eviction could occur since both entities want it in an exclusive state.
6814  */
6815 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6816 {
6817         struct tg3 *tp = tnapi->tp;
6818         u32 work_mask, rx_std_posted = 0;
6819         u32 std_prod_idx, jmb_prod_idx;
6820         u32 sw_idx = tnapi->rx_rcb_ptr;
6821         u16 hw_idx;
6822         int received;
6823         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6824
6825         hw_idx = *(tnapi->rx_rcb_prod_idx);
6826         /*
6827          * We need to order the read of hw_idx and the read of
6828          * the opaque cookie.
6829          */
6830         rmb();
6831         work_mask = 0;
6832         received = 0;
6833         std_prod_idx = tpr->rx_std_prod_idx;
6834         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6835         while (sw_idx != hw_idx && budget > 0) {
6836                 struct ring_info *ri;
6837                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6838                 unsigned int len;
6839                 struct sk_buff *skb;
6840                 dma_addr_t dma_addr;
6841                 u32 opaque_key, desc_idx, *post_ptr;
6842                 u8 *data;
6843                 u64 tstamp = 0;
6844
6845                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6846                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6847                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6848                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6849                         dma_addr = dma_unmap_addr(ri, mapping);
6850                         data = ri->data;
6851                         post_ptr = &std_prod_idx;
6852                         rx_std_posted++;
6853                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6854                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6855                         dma_addr = dma_unmap_addr(ri, mapping);
6856                         data = ri->data;
6857                         post_ptr = &jmb_prod_idx;
6858                 } else
6859                         goto next_pkt_nopost;
6860
6861                 work_mask |= opaque_key;
6862
6863                 if (desc->err_vlan & RXD_ERR_MASK) {
6864                 drop_it:
6865                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6866                                        desc_idx, *post_ptr);
6867                 drop_it_no_recycle:
6868                         /* Other statistics kept track of by card. */
6869                         tp->rx_dropped++;
6870                         goto next_pkt;
6871                 }
6872
6873                 prefetch(data + TG3_RX_OFFSET(tp));
6874                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6875                       ETH_FCS_LEN;
6876
6877                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6878                      RXD_FLAG_PTPSTAT_PTPV1 ||
6879                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6880                      RXD_FLAG_PTPSTAT_PTPV2) {
6881                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6882                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6883                 }
6884
6885                 if (len > TG3_RX_COPY_THRESH(tp)) {
6886                         int skb_size;
6887                         unsigned int frag_size;
6888
6889                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6890                                                     *post_ptr, &frag_size);
6891                         if (skb_size < 0)
6892                                 goto drop_it;
6893
6894                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6895                                          PCI_DMA_FROMDEVICE);
6896
6897                         /* Ensure that the update to the data happens
6898                          * after the usage of the old DMA mapping.
6899                          */
6900                         smp_wmb();
6901
6902                         ri->data = NULL;
6903
6904                         skb = build_skb(data, frag_size);
6905                         if (!skb) {
6906                                 tg3_frag_free(frag_size != 0, data);
6907                                 goto drop_it_no_recycle;
6908                         }
6909                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6910                 } else {
6911                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6912                                        desc_idx, *post_ptr);
6913
6914                         skb = netdev_alloc_skb(tp->dev,
6915                                                len + TG3_RAW_IP_ALIGN);
6916                         if (skb == NULL)
6917                                 goto drop_it_no_recycle;
6918
6919                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6920                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6921                         memcpy(skb->data,
6922                                data + TG3_RX_OFFSET(tp),
6923                                len);
6924                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6925                 }
6926
6927                 skb_put(skb, len);
6928                 if (tstamp)
6929                         tg3_hwclock_to_timestamp(tp, tstamp,
6930                                                  skb_hwtstamps(skb));
6931
6932                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6933                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6934                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6935                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6936                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6937                 else
6938                         skb_checksum_none_assert(skb);
6939
6940                 skb->protocol = eth_type_trans(skb, tp->dev);
6941
6942                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6943                     skb->protocol != htons(ETH_P_8021Q) &&
6944                     skb->protocol != htons(ETH_P_8021AD)) {
6945                         dev_kfree_skb_any(skb);
6946                         goto drop_it_no_recycle;
6947                 }
6948
6949                 if (desc->type_flags & RXD_FLAG_VLAN &&
6950                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6951                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6952                                                desc->err_vlan & RXD_VLAN_MASK);
6953
6954                 napi_gro_receive(&tnapi->napi, skb);
6955
6956                 received++;
6957                 budget--;
6958
6959 next_pkt:
6960                 (*post_ptr)++;
6961
6962                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6963                         tpr->rx_std_prod_idx = std_prod_idx &
6964                                                tp->rx_std_ring_mask;
6965                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6966                                      tpr->rx_std_prod_idx);
6967                         work_mask &= ~RXD_OPAQUE_RING_STD;
6968                         rx_std_posted = 0;
6969                 }
6970 next_pkt_nopost:
6971                 sw_idx++;
6972                 sw_idx &= tp->rx_ret_ring_mask;
6973
6974                 /* Refresh hw_idx to see if there is new work */
6975                 if (sw_idx == hw_idx) {
6976                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6977                         rmb();
6978                 }
6979         }
6980
6981         /* ACK the status ring. */
6982         tnapi->rx_rcb_ptr = sw_idx;
6983         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6984
6985         /* Refill RX ring(s). */
6986         if (!tg3_flag(tp, ENABLE_RSS)) {
6987                 /* Sync BD data before updating mailbox */
6988                 wmb();
6989
6990                 if (work_mask & RXD_OPAQUE_RING_STD) {
6991                         tpr->rx_std_prod_idx = std_prod_idx &
6992                                                tp->rx_std_ring_mask;
6993                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6994                                      tpr->rx_std_prod_idx);
6995                 }
6996                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6997                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6998                                                tp->rx_jmb_ring_mask;
6999                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7000                                      tpr->rx_jmb_prod_idx);
7001                 }
7002                 mmiowb();
7003         } else if (work_mask) {
7004                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7005                  * updated before the producer indices can be updated.
7006                  */
7007                 smp_wmb();
7008
7009                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7010                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7011
7012                 if (tnapi != &tp->napi[1]) {
7013                         tp->rx_refill = true;
7014                         napi_schedule(&tp->napi[1].napi);
7015                 }
7016         }
7017
7018         return received;
7019 }
7020
7021 static void tg3_poll_link(struct tg3 *tp)
7022 {
7023         /* handle link change and other phy events */
7024         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7025                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7026
7027                 if (sblk->status & SD_STATUS_LINK_CHG) {
7028                         sblk->status = SD_STATUS_UPDATED |
7029                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7030                         spin_lock(&tp->lock);
7031                         if (tg3_flag(tp, USE_PHYLIB)) {
7032                                 tw32_f(MAC_STATUS,
7033                                      (MAC_STATUS_SYNC_CHANGED |
7034                                       MAC_STATUS_CFG_CHANGED |
7035                                       MAC_STATUS_MI_COMPLETION |
7036                                       MAC_STATUS_LNKSTATE_CHANGED));
7037                                 udelay(40);
7038                         } else
7039                                 tg3_setup_phy(tp, false);
7040                         spin_unlock(&tp->lock);
7041                 }
7042         }
7043 }
7044
7045 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7046                                 struct tg3_rx_prodring_set *dpr,
7047                                 struct tg3_rx_prodring_set *spr)
7048 {
7049         u32 si, di, cpycnt, src_prod_idx;
7050         int i, err = 0;
7051
7052         while (1) {
7053                 src_prod_idx = spr->rx_std_prod_idx;
7054
7055                 /* Make sure updates to the rx_std_buffers[] entries and the
7056                  * standard producer index are seen in the correct order.
7057                  */
7058                 smp_rmb();
7059
7060                 if (spr->rx_std_cons_idx == src_prod_idx)
7061                         break;
7062
7063                 if (spr->rx_std_cons_idx < src_prod_idx)
7064                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7065                 else
7066                         cpycnt = tp->rx_std_ring_mask + 1 -
7067                                  spr->rx_std_cons_idx;
7068
7069                 cpycnt = min(cpycnt,
7070                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7071
7072                 si = spr->rx_std_cons_idx;
7073                 di = dpr->rx_std_prod_idx;
7074
7075                 for (i = di; i < di + cpycnt; i++) {
7076                         if (dpr->rx_std_buffers[i].data) {
7077                                 cpycnt = i - di;
7078                                 err = -ENOSPC;
7079                                 break;
7080                         }
7081                 }
7082
7083                 if (!cpycnt)
7084                         break;
7085
7086                 /* Ensure that updates to the rx_std_buffers ring and the
7087                  * shadowed hardware producer ring from tg3_recycle_skb() are
7088                  * ordered correctly WRT the skb check above.
7089                  */
7090                 smp_rmb();
7091
7092                 memcpy(&dpr->rx_std_buffers[di],
7093                        &spr->rx_std_buffers[si],
7094                        cpycnt * sizeof(struct ring_info));
7095
7096                 for (i = 0; i < cpycnt; i++, di++, si++) {
7097                         struct tg3_rx_buffer_desc *sbd, *dbd;
7098                         sbd = &spr->rx_std[si];
7099                         dbd = &dpr->rx_std[di];
7100                         dbd->addr_hi = sbd->addr_hi;
7101                         dbd->addr_lo = sbd->addr_lo;
7102                 }
7103
7104                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7105                                        tp->rx_std_ring_mask;
7106                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7107                                        tp->rx_std_ring_mask;
7108         }
7109
7110         while (1) {
7111                 src_prod_idx = spr->rx_jmb_prod_idx;
7112
7113                 /* Make sure updates to the rx_jmb_buffers[] entries and
7114                  * the jumbo producer index are seen in the correct order.
7115                  */
7116                 smp_rmb();
7117
7118                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7119                         break;
7120
7121                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7122                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7123                 else
7124                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7125                                  spr->rx_jmb_cons_idx;
7126
7127                 cpycnt = min(cpycnt,
7128                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7129
7130                 si = spr->rx_jmb_cons_idx;
7131                 di = dpr->rx_jmb_prod_idx;
7132
7133                 for (i = di; i < di + cpycnt; i++) {
7134                         if (dpr->rx_jmb_buffers[i].data) {
7135                                 cpycnt = i - di;
7136                                 err = -ENOSPC;
7137                                 break;
7138                         }
7139                 }
7140
7141                 if (!cpycnt)
7142                         break;
7143
7144                 /* Ensure that updates to the rx_jmb_buffers ring and the
7145                  * shadowed hardware producer ring from tg3_recycle_skb() are
7146                  * ordered correctly WRT the skb check above.
7147                  */
7148                 smp_rmb();
7149
7150                 memcpy(&dpr->rx_jmb_buffers[di],
7151                        &spr->rx_jmb_buffers[si],
7152                        cpycnt * sizeof(struct ring_info));
7153
7154                 for (i = 0; i < cpycnt; i++, di++, si++) {
7155                         struct tg3_rx_buffer_desc *sbd, *dbd;
7156                         sbd = &spr->rx_jmb[si].std;
7157                         dbd = &dpr->rx_jmb[di].std;
7158                         dbd->addr_hi = sbd->addr_hi;
7159                         dbd->addr_lo = sbd->addr_lo;
7160                 }
7161
7162                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7163                                        tp->rx_jmb_ring_mask;
7164                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7165                                        tp->rx_jmb_ring_mask;
7166         }
7167
7168         return err;
7169 }
7170
7171 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7172 {
7173         struct tg3 *tp = tnapi->tp;
7174
7175         /* run TX completion thread */
7176         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7177                 tg3_tx(tnapi);
7178                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7179                         return work_done;
7180         }
7181
7182         if (!tnapi->rx_rcb_prod_idx)
7183                 return work_done;
7184
7185         /* run RX thread, within the bounds set by NAPI.
7186          * All RX "locking" is done by ensuring outside
7187          * code synchronizes with tg3->napi.poll()
7188          */
7189         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7190                 work_done += tg3_rx(tnapi, budget - work_done);
7191
7192         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7193                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7194                 int i, err = 0;
7195                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7196                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7197
7198                 tp->rx_refill = false;
7199                 for (i = 1; i <= tp->rxq_cnt; i++)
7200                         err |= tg3_rx_prodring_xfer(tp, dpr,
7201                                                     &tp->napi[i].prodring);
7202
7203                 wmb();
7204
7205                 if (std_prod_idx != dpr->rx_std_prod_idx)
7206                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7207                                      dpr->rx_std_prod_idx);
7208
7209                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7210                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7211                                      dpr->rx_jmb_prod_idx);
7212
7213                 mmiowb();
7214
7215                 if (err)
7216                         tw32_f(HOSTCC_MODE, tp->coal_now);
7217         }
7218
7219         return work_done;
7220 }
7221
7222 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7223 {
7224         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7225                 schedule_work(&tp->reset_task);
7226 }
7227
7228 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7229 {
7230         cancel_work_sync(&tp->reset_task);
7231         tg3_flag_clear(tp, RESET_TASK_PENDING);
7232         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7233 }
7234
7235 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7236 {
7237         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7238         struct tg3 *tp = tnapi->tp;
7239         int work_done = 0;
7240         struct tg3_hw_status *sblk = tnapi->hw_status;
7241
7242         while (1) {
7243                 work_done = tg3_poll_work(tnapi, work_done, budget);
7244
7245                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7246                         goto tx_recovery;
7247
7248                 if (unlikely(work_done >= budget))
7249                         break;
7250
7251                 /* tp->last_tag is used in tg3_int_reenable() below
7252                  * to tell the hw how much work has been processed,
7253                  * so we must read it before checking for more work.
7254                  */
7255                 tnapi->last_tag = sblk->status_tag;
7256                 tnapi->last_irq_tag = tnapi->last_tag;
7257                 rmb();
7258
7259                 /* check for RX/TX work to do */
7260                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7261                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7262
7263                         /* This test here is not race free, but will reduce
7264                          * the number of interrupts by looping again.
7265                          */
7266                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7267                                 continue;
7268
7269                         napi_complete_done(napi, work_done);
7270                         /* Reenable interrupts. */
7271                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7272
7273                         /* This test here is synchronized by napi_schedule()
7274                          * and napi_complete() to close the race condition.
7275                          */
7276                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7277                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7278                                                   HOSTCC_MODE_ENABLE |
7279                                                   tnapi->coal_now);
7280                         }
7281                         mmiowb();
7282                         break;
7283                 }
7284         }
7285
7286         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7287         return work_done;
7288
7289 tx_recovery:
7290         /* work_done is guaranteed to be less than budget. */
7291         napi_complete(napi);
7292         tg3_reset_task_schedule(tp);
7293         return work_done;
7294 }
7295
7296 static void tg3_process_error(struct tg3 *tp)
7297 {
7298         u32 val;
7299         bool real_error = false;
7300
7301         if (tg3_flag(tp, ERROR_PROCESSED))
7302                 return;
7303
7304         /* Check Flow Attention register */
7305         val = tr32(HOSTCC_FLOW_ATTN);
7306         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7307                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7308                 real_error = true;
7309         }
7310
7311         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7312                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7313                 real_error = true;
7314         }
7315
7316         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7317                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7318                 real_error = true;
7319         }
7320
7321         if (!real_error)
7322                 return;
7323
7324         tg3_dump_state(tp);
7325
7326         tg3_flag_set(tp, ERROR_PROCESSED);
7327         tg3_reset_task_schedule(tp);
7328 }
7329
7330 static int tg3_poll(struct napi_struct *napi, int budget)
7331 {
7332         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7333         struct tg3 *tp = tnapi->tp;
7334         int work_done = 0;
7335         struct tg3_hw_status *sblk = tnapi->hw_status;
7336
7337         while (1) {
7338                 if (sblk->status & SD_STATUS_ERROR)
7339                         tg3_process_error(tp);
7340
7341                 tg3_poll_link(tp);
7342
7343                 work_done = tg3_poll_work(tnapi, work_done, budget);
7344
7345                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7346                         goto tx_recovery;
7347
7348                 if (unlikely(work_done >= budget))
7349                         break;
7350
7351                 if (tg3_flag(tp, TAGGED_STATUS)) {
7352                         /* tp->last_tag is used in tg3_int_reenable() below
7353                          * to tell the hw how much work has been processed,
7354                          * so we must read it before checking for more work.
7355                          */
7356                         tnapi->last_tag = sblk->status_tag;
7357                         tnapi->last_irq_tag = tnapi->last_tag;
7358                         rmb();
7359                 } else
7360                         sblk->status &= ~SD_STATUS_UPDATED;
7361
7362                 if (likely(!tg3_has_work(tnapi))) {
7363                         napi_complete_done(napi, work_done);
7364                         tg3_int_reenable(tnapi);
7365                         break;
7366                 }
7367         }
7368
7369         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7370         return work_done;
7371
7372 tx_recovery:
7373         /* work_done is guaranteed to be less than budget. */
7374         napi_complete(napi);
7375         tg3_reset_task_schedule(tp);
7376         return work_done;
7377 }
7378
7379 static void tg3_napi_disable(struct tg3 *tp)
7380 {
7381         int i;
7382
7383         for (i = tp->irq_cnt - 1; i >= 0; i--)
7384                 napi_disable(&tp->napi[i].napi);
7385 }
7386
7387 static void tg3_napi_enable(struct tg3 *tp)
7388 {
7389         int i;
7390
7391         for (i = 0; i < tp->irq_cnt; i++)
7392                 napi_enable(&tp->napi[i].napi);
7393 }
7394
7395 static void tg3_napi_init(struct tg3 *tp)
7396 {
7397         int i;
7398
7399         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7400         for (i = 1; i < tp->irq_cnt; i++)
7401                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7402 }
7403
7404 static void tg3_napi_fini(struct tg3 *tp)
7405 {
7406         int i;
7407
7408         for (i = 0; i < tp->irq_cnt; i++)
7409                 netif_napi_del(&tp->napi[i].napi);
7410 }
7411
7412 static inline void tg3_netif_stop(struct tg3 *tp)
7413 {
7414         netif_trans_update(tp->dev);    /* prevent tx timeout */
7415         tg3_napi_disable(tp);
7416         netif_carrier_off(tp->dev);
7417         netif_tx_disable(tp->dev);
7418 }
7419
7420 /* tp->lock must be held */
7421 static inline void tg3_netif_start(struct tg3 *tp)
7422 {
7423         tg3_ptp_resume(tp);
7424
7425         /* NOTE: unconditional netif_tx_wake_all_queues is only
7426          * appropriate so long as all callers are assured to
7427          * have free tx slots (such as after tg3_init_hw)
7428          */
7429         netif_tx_wake_all_queues(tp->dev);
7430
7431         if (tp->link_up)
7432                 netif_carrier_on(tp->dev);
7433
7434         tg3_napi_enable(tp);
7435         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7436         tg3_enable_ints(tp);
7437 }
7438
7439 static void tg3_irq_quiesce(struct tg3 *tp)
7440         __releases(tp->lock)
7441         __acquires(tp->lock)
7442 {
7443         int i;
7444
7445         BUG_ON(tp->irq_sync);
7446
7447         tp->irq_sync = 1;
7448         smp_mb();
7449
7450         spin_unlock_bh(&tp->lock);
7451
7452         for (i = 0; i < tp->irq_cnt; i++)
7453                 synchronize_irq(tp->napi[i].irq_vec);
7454
7455         spin_lock_bh(&tp->lock);
7456 }
7457
7458 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7459  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7460  * with as well.  Most of the time, this is not necessary except when
7461  * shutting down the device.
7462  */
7463 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7464 {
7465         spin_lock_bh(&tp->lock);
7466         if (irq_sync)
7467                 tg3_irq_quiesce(tp);
7468 }
7469
7470 static inline void tg3_full_unlock(struct tg3 *tp)
7471 {
7472         spin_unlock_bh(&tp->lock);
7473 }
7474
7475 /* One-shot MSI handler - Chip automatically disables interrupt
7476  * after sending MSI so driver doesn't have to do it.
7477  */
7478 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7479 {
7480         struct tg3_napi *tnapi = dev_id;
7481         struct tg3 *tp = tnapi->tp;
7482
7483         prefetch(tnapi->hw_status);
7484         if (tnapi->rx_rcb)
7485                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7486
7487         if (likely(!tg3_irq_sync(tp)))
7488                 napi_schedule(&tnapi->napi);
7489
7490         return IRQ_HANDLED;
7491 }
7492
7493 /* MSI ISR - No need to check for interrupt sharing and no need to
7494  * flush status block and interrupt mailbox. PCI ordering rules
7495  * guarantee that MSI will arrive after the status block.
7496  */
7497 static irqreturn_t tg3_msi(int irq, void *dev_id)
7498 {
7499         struct tg3_napi *tnapi = dev_id;
7500         struct tg3 *tp = tnapi->tp;
7501
7502         prefetch(tnapi->hw_status);
7503         if (tnapi->rx_rcb)
7504                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7505         /*
7506          * Writing any value to intr-mbox-0 clears PCI INTA# and
7507          * chip-internal interrupt pending events.
7508          * Writing non-zero to intr-mbox-0 additional tells the
7509          * NIC to stop sending us irqs, engaging "in-intr-handler"
7510          * event coalescing.
7511          */
7512         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7513         if (likely(!tg3_irq_sync(tp)))
7514                 napi_schedule(&tnapi->napi);
7515
7516         return IRQ_RETVAL(1);
7517 }
7518
7519 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7520 {
7521         struct tg3_napi *tnapi = dev_id;
7522         struct tg3 *tp = tnapi->tp;
7523         struct tg3_hw_status *sblk = tnapi->hw_status;
7524         unsigned int handled = 1;
7525
7526         /* In INTx mode, it is possible for the interrupt to arrive at
7527          * the CPU before the status block posted prior to the interrupt.
7528          * Reading the PCI State register will confirm whether the
7529          * interrupt is ours and will flush the status block.
7530          */
7531         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7532                 if (tg3_flag(tp, CHIP_RESETTING) ||
7533                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7534                         handled = 0;
7535                         goto out;
7536                 }
7537         }
7538
7539         /*
7540          * Writing any value to intr-mbox-0 clears PCI INTA# and
7541          * chip-internal interrupt pending events.
7542          * Writing non-zero to intr-mbox-0 additional tells the
7543          * NIC to stop sending us irqs, engaging "in-intr-handler"
7544          * event coalescing.
7545          *
7546          * Flush the mailbox to de-assert the IRQ immediately to prevent
7547          * spurious interrupts.  The flush impacts performance but
7548          * excessive spurious interrupts can be worse in some cases.
7549          */
7550         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7551         if (tg3_irq_sync(tp))
7552                 goto out;
7553         sblk->status &= ~SD_STATUS_UPDATED;
7554         if (likely(tg3_has_work(tnapi))) {
7555                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7556                 napi_schedule(&tnapi->napi);
7557         } else {
7558                 /* No work, shared interrupt perhaps?  re-enable
7559                  * interrupts, and flush that PCI write
7560                  */
7561                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7562                                0x00000000);
7563         }
7564 out:
7565         return IRQ_RETVAL(handled);
7566 }
7567
7568 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7569 {
7570         struct tg3_napi *tnapi = dev_id;
7571         struct tg3 *tp = tnapi->tp;
7572         struct tg3_hw_status *sblk = tnapi->hw_status;
7573         unsigned int handled = 1;
7574
7575         /* In INTx mode, it is possible for the interrupt to arrive at
7576          * the CPU before the status block posted prior to the interrupt.
7577          * Reading the PCI State register will confirm whether the
7578          * interrupt is ours and will flush the status block.
7579          */
7580         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7581                 if (tg3_flag(tp, CHIP_RESETTING) ||
7582                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7583                         handled = 0;
7584                         goto out;
7585                 }
7586         }
7587
7588         /*
7589          * writing any value to intr-mbox-0 clears PCI INTA# and
7590          * chip-internal interrupt pending events.
7591          * writing non-zero to intr-mbox-0 additional tells the
7592          * NIC to stop sending us irqs, engaging "in-intr-handler"
7593          * event coalescing.
7594          *
7595          * Flush the mailbox to de-assert the IRQ immediately to prevent
7596          * spurious interrupts.  The flush impacts performance but
7597          * excessive spurious interrupts can be worse in some cases.
7598          */
7599         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7600
7601         /*
7602          * In a shared interrupt configuration, sometimes other devices'
7603          * interrupts will scream.  We record the current status tag here
7604          * so that the above check can report that the screaming interrupts
7605          * are unhandled.  Eventually they will be silenced.
7606          */
7607         tnapi->last_irq_tag = sblk->status_tag;
7608
7609         if (tg3_irq_sync(tp))
7610                 goto out;
7611
7612         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7613
7614         napi_schedule(&tnapi->napi);
7615
7616 out:
7617         return IRQ_RETVAL(handled);
7618 }
7619
7620 /* ISR for interrupt test */
7621 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7622 {
7623         struct tg3_napi *tnapi = dev_id;
7624         struct tg3 *tp = tnapi->tp;
7625         struct tg3_hw_status *sblk = tnapi->hw_status;
7626
7627         if ((sblk->status & SD_STATUS_UPDATED) ||
7628             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7629                 tg3_disable_ints(tp);
7630                 return IRQ_RETVAL(1);
7631         }
7632         return IRQ_RETVAL(0);
7633 }
7634
7635 #ifdef CONFIG_NET_POLL_CONTROLLER
7636 static void tg3_poll_controller(struct net_device *dev)
7637 {
7638         int i;
7639         struct tg3 *tp = netdev_priv(dev);
7640
7641         if (tg3_irq_sync(tp))
7642                 return;
7643
7644         for (i = 0; i < tp->irq_cnt; i++)
7645                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7646 }
7647 #endif
7648
7649 static void tg3_tx_timeout(struct net_device *dev)
7650 {
7651         struct tg3 *tp = netdev_priv(dev);
7652
7653         if (netif_msg_tx_err(tp)) {
7654                 netdev_err(dev, "transmit timed out, resetting\n");
7655                 tg3_dump_state(tp);
7656         }
7657
7658         tg3_reset_task_schedule(tp);
7659 }
7660
7661 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7662 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7663 {
7664         u32 base = (u32) mapping & 0xffffffff;
7665
7666         return base + len + 8 < base;
7667 }
7668
7669 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7670  * of any 4GB boundaries: 4G, 8G, etc
7671  */
7672 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7673                                            u32 len, u32 mss)
7674 {
7675         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7676                 u32 base = (u32) mapping & 0xffffffff;
7677
7678                 return ((base + len + (mss & 0x3fff)) < base);
7679         }
7680         return 0;
7681 }
7682
7683 /* Test for DMA addresses > 40-bit */
7684 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7685                                           int len)
7686 {
7687 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7688         if (tg3_flag(tp, 40BIT_DMA_BUG))
7689                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7690         return 0;
7691 #else
7692         return 0;
7693 #endif
7694 }
7695
7696 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7697                                  dma_addr_t mapping, u32 len, u32 flags,
7698                                  u32 mss, u32 vlan)
7699 {
7700         txbd->addr_hi = ((u64) mapping >> 32);
7701         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7702         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7703         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7704 }
7705
7706 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7707                             dma_addr_t map, u32 len, u32 flags,
7708                             u32 mss, u32 vlan)
7709 {
7710         struct tg3 *tp = tnapi->tp;
7711         bool hwbug = false;
7712
7713         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7714                 hwbug = true;
7715
7716         if (tg3_4g_overflow_test(map, len))
7717                 hwbug = true;
7718
7719         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7720                 hwbug = true;
7721
7722         if (tg3_40bit_overflow_test(tp, map, len))
7723                 hwbug = true;
7724
7725         if (tp->dma_limit) {
7726                 u32 prvidx = *entry;
7727                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7728                 while (len > tp->dma_limit && *budget) {
7729                         u32 frag_len = tp->dma_limit;
7730                         len -= tp->dma_limit;
7731
7732                         /* Avoid the 8byte DMA problem */
7733                         if (len <= 8) {
7734                                 len += tp->dma_limit / 2;
7735                                 frag_len = tp->dma_limit / 2;
7736                         }
7737
7738                         tnapi->tx_buffers[*entry].fragmented = true;
7739
7740                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7741                                       frag_len, tmp_flag, mss, vlan);
7742                         *budget -= 1;
7743                         prvidx = *entry;
7744                         *entry = NEXT_TX(*entry);
7745
7746                         map += frag_len;
7747                 }
7748
7749                 if (len) {
7750                         if (*budget) {
7751                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7752                                               len, flags, mss, vlan);
7753                                 *budget -= 1;
7754                                 *entry = NEXT_TX(*entry);
7755                         } else {
7756                                 hwbug = true;
7757                                 tnapi->tx_buffers[prvidx].fragmented = false;
7758                         }
7759                 }
7760         } else {
7761                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7762                               len, flags, mss, vlan);
7763                 *entry = NEXT_TX(*entry);
7764         }
7765
7766         return hwbug;
7767 }
7768
7769 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7770 {
7771         int i;
7772         struct sk_buff *skb;
7773         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7774
7775         skb = txb->skb;
7776         txb->skb = NULL;
7777
7778         pci_unmap_single(tnapi->tp->pdev,
7779                          dma_unmap_addr(txb, mapping),
7780                          skb_headlen(skb),
7781                          PCI_DMA_TODEVICE);
7782
7783         while (txb->fragmented) {
7784                 txb->fragmented = false;
7785                 entry = NEXT_TX(entry);
7786                 txb = &tnapi->tx_buffers[entry];
7787         }
7788
7789         for (i = 0; i <= last; i++) {
7790                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7791
7792                 entry = NEXT_TX(entry);
7793                 txb = &tnapi->tx_buffers[entry];
7794
7795                 pci_unmap_page(tnapi->tp->pdev,
7796                                dma_unmap_addr(txb, mapping),
7797                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7798
7799                 while (txb->fragmented) {
7800                         txb->fragmented = false;
7801                         entry = NEXT_TX(entry);
7802                         txb = &tnapi->tx_buffers[entry];
7803                 }
7804         }
7805 }
7806
7807 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7808 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7809                                        struct sk_buff **pskb,
7810                                        u32 *entry, u32 *budget,
7811                                        u32 base_flags, u32 mss, u32 vlan)
7812 {
7813         struct tg3 *tp = tnapi->tp;
7814         struct sk_buff *new_skb, *skb = *pskb;
7815         dma_addr_t new_addr = 0;
7816         int ret = 0;
7817
7818         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7819                 new_skb = skb_copy(skb, GFP_ATOMIC);
7820         else {
7821                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7822
7823                 new_skb = skb_copy_expand(skb,
7824                                           skb_headroom(skb) + more_headroom,
7825                                           skb_tailroom(skb), GFP_ATOMIC);
7826         }
7827
7828         if (!new_skb) {
7829                 ret = -1;
7830         } else {
7831                 /* New SKB is guaranteed to be linear. */
7832                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7833                                           PCI_DMA_TODEVICE);
7834                 /* Make sure the mapping succeeded */
7835                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7836                         dev_kfree_skb_any(new_skb);
7837                         ret = -1;
7838                 } else {
7839                         u32 save_entry = *entry;
7840
7841                         base_flags |= TXD_FLAG_END;
7842
7843                         tnapi->tx_buffers[*entry].skb = new_skb;
7844                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7845                                            mapping, new_addr);
7846
7847                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7848                                             new_skb->len, base_flags,
7849                                             mss, vlan)) {
7850                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7851                                 dev_kfree_skb_any(new_skb);
7852                                 ret = -1;
7853                         }
7854                 }
7855         }
7856
7857         dev_consume_skb_any(skb);
7858         *pskb = new_skb;
7859         return ret;
7860 }
7861
7862 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7863 {
7864         /* Check if we will never have enough descriptors,
7865          * as gso_segs can be more than current ring size
7866          */
7867         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7868 }
7869
7870 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7871
7872 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7873  * indicated in tg3_tx_frag_set()
7874  */
7875 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7876                        struct netdev_queue *txq, struct sk_buff *skb)
7877 {
7878         struct sk_buff *segs, *nskb;
7879         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7880
7881         /* Estimate the number of fragments in the worst case */
7882         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7883                 netif_tx_stop_queue(txq);
7884
7885                 /* netif_tx_stop_queue() must be done before checking
7886                  * checking tx index in tg3_tx_avail() below, because in
7887                  * tg3_tx(), we update tx index before checking for
7888                  * netif_tx_queue_stopped().
7889                  */
7890                 smp_mb();
7891                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7892                         return NETDEV_TX_BUSY;
7893
7894                 netif_tx_wake_queue(txq);
7895         }
7896
7897         segs = skb_gso_segment(skb, tp->dev->features &
7898                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7899         if (IS_ERR(segs) || !segs)
7900                 goto tg3_tso_bug_end;
7901
7902         do {
7903                 nskb = segs;
7904                 segs = segs->next;
7905                 nskb->next = NULL;
7906                 tg3_start_xmit(nskb, tp->dev);
7907         } while (segs);
7908
7909 tg3_tso_bug_end:
7910         dev_consume_skb_any(skb);
7911
7912         return NETDEV_TX_OK;
7913 }
7914
7915 /* hard_start_xmit for all devices */
7916 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7917 {
7918         struct tg3 *tp = netdev_priv(dev);
7919         u32 len, entry, base_flags, mss, vlan = 0;
7920         u32 budget;
7921         int i = -1, would_hit_hwbug;
7922         dma_addr_t mapping;
7923         struct tg3_napi *tnapi;
7924         struct netdev_queue *txq;
7925         unsigned int last;
7926         struct iphdr *iph = NULL;
7927         struct tcphdr *tcph = NULL;
7928         __sum16 tcp_csum = 0, ip_csum = 0;
7929         __be16 ip_tot_len = 0;
7930
7931         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7932         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7933         if (tg3_flag(tp, ENABLE_TSS))
7934                 tnapi++;
7935
7936         budget = tg3_tx_avail(tnapi);
7937
7938         /* We are running in BH disabled context with netif_tx_lock
7939          * and TX reclaim runs via tp->napi.poll inside of a software
7940          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7941          * no IRQ context deadlocks to worry about either.  Rejoice!
7942          */
7943         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7944                 if (!netif_tx_queue_stopped(txq)) {
7945                         netif_tx_stop_queue(txq);
7946
7947                         /* This is a hard error, log it. */
7948                         netdev_err(dev,
7949                                    "BUG! Tx Ring full when queue awake!\n");
7950                 }
7951                 return NETDEV_TX_BUSY;
7952         }
7953
7954         entry = tnapi->tx_prod;
7955         base_flags = 0;
7956
7957         mss = skb_shinfo(skb)->gso_size;
7958         if (mss) {
7959                 u32 tcp_opt_len, hdr_len;
7960
7961                 if (skb_cow_head(skb, 0))
7962                         goto drop;
7963
7964                 iph = ip_hdr(skb);
7965                 tcp_opt_len = tcp_optlen(skb);
7966
7967                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7968
7969                 /* HW/FW can not correctly segment packets that have been
7970                  * vlan encapsulated.
7971                  */
7972                 if (skb->protocol == htons(ETH_P_8021Q) ||
7973                     skb->protocol == htons(ETH_P_8021AD)) {
7974                         if (tg3_tso_bug_gso_check(tnapi, skb))
7975                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7976                         goto drop;
7977                 }
7978
7979                 if (!skb_is_gso_v6(skb)) {
7980                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7981                             tg3_flag(tp, TSO_BUG)) {
7982                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7983                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7984                                 goto drop;
7985                         }
7986                         ip_csum = iph->check;
7987                         ip_tot_len = iph->tot_len;
7988                         iph->check = 0;
7989                         iph->tot_len = htons(mss + hdr_len);
7990                 }
7991
7992                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7993                                TXD_FLAG_CPU_POST_DMA);
7994
7995                 tcph = tcp_hdr(skb);
7996                 tcp_csum = tcph->check;
7997
7998                 if (tg3_flag(tp, HW_TSO_1) ||
7999                     tg3_flag(tp, HW_TSO_2) ||
8000                     tg3_flag(tp, HW_TSO_3)) {
8001                         tcph->check = 0;
8002                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8003                 } else {
8004                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8005                                                          0, IPPROTO_TCP, 0);
8006                 }
8007
8008                 if (tg3_flag(tp, HW_TSO_3)) {
8009                         mss |= (hdr_len & 0xc) << 12;
8010                         if (hdr_len & 0x10)
8011                                 base_flags |= 0x00000010;
8012                         base_flags |= (hdr_len & 0x3e0) << 5;
8013                 } else if (tg3_flag(tp, HW_TSO_2))
8014                         mss |= hdr_len << 9;
8015                 else if (tg3_flag(tp, HW_TSO_1) ||
8016                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8017                         if (tcp_opt_len || iph->ihl > 5) {
8018                                 int tsflags;
8019
8020                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8021                                 mss |= (tsflags << 11);
8022                         }
8023                 } else {
8024                         if (tcp_opt_len || iph->ihl > 5) {
8025                                 int tsflags;
8026
8027                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8028                                 base_flags |= tsflags << 12;
8029                         }
8030                 }
8031         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8032                 /* HW/FW can not correctly checksum packets that have been
8033                  * vlan encapsulated.
8034                  */
8035                 if (skb->protocol == htons(ETH_P_8021Q) ||
8036                     skb->protocol == htons(ETH_P_8021AD)) {
8037                         if (skb_checksum_help(skb))
8038                                 goto drop;
8039                 } else  {
8040                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8041                 }
8042         }
8043
8044         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8045             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8046                 base_flags |= TXD_FLAG_JMB_PKT;
8047
8048         if (skb_vlan_tag_present(skb)) {
8049                 base_flags |= TXD_FLAG_VLAN;
8050                 vlan = skb_vlan_tag_get(skb);
8051         }
8052
8053         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8054             tg3_flag(tp, TX_TSTAMP_EN)) {
8055                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8056                 base_flags |= TXD_FLAG_HWTSTAMP;
8057         }
8058
8059         len = skb_headlen(skb);
8060
8061         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8062         if (pci_dma_mapping_error(tp->pdev, mapping))
8063                 goto drop;
8064
8065
8066         tnapi->tx_buffers[entry].skb = skb;
8067         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8068
8069         would_hit_hwbug = 0;
8070
8071         if (tg3_flag(tp, 5701_DMA_BUG))
8072                 would_hit_hwbug = 1;
8073
8074         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8075                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8076                             mss, vlan)) {
8077                 would_hit_hwbug = 1;
8078         } else if (skb_shinfo(skb)->nr_frags > 0) {
8079                 u32 tmp_mss = mss;
8080
8081                 if (!tg3_flag(tp, HW_TSO_1) &&
8082                     !tg3_flag(tp, HW_TSO_2) &&
8083                     !tg3_flag(tp, HW_TSO_3))
8084                         tmp_mss = 0;
8085
8086                 /* Now loop through additional data
8087                  * fragments, and queue them.
8088                  */
8089                 last = skb_shinfo(skb)->nr_frags - 1;
8090                 for (i = 0; i <= last; i++) {
8091                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8092
8093                         len = skb_frag_size(frag);
8094                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8095                                                    len, DMA_TO_DEVICE);
8096
8097                         tnapi->tx_buffers[entry].skb = NULL;
8098                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8099                                            mapping);
8100                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8101                                 goto dma_error;
8102
8103                         if (!budget ||
8104                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8105                                             len, base_flags |
8106                                             ((i == last) ? TXD_FLAG_END : 0),
8107                                             tmp_mss, vlan)) {
8108                                 would_hit_hwbug = 1;
8109                                 break;
8110                         }
8111                 }
8112         }
8113
8114         if (would_hit_hwbug) {
8115                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8116
8117                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8118                         /* If it's a TSO packet, do GSO instead of
8119                          * allocating and copying to a large linear SKB
8120                          */
8121                         if (ip_tot_len) {
8122                                 iph->check = ip_csum;
8123                                 iph->tot_len = ip_tot_len;
8124                         }
8125                         tcph->check = tcp_csum;
8126                         return tg3_tso_bug(tp, tnapi, txq, skb);
8127                 }
8128
8129                 /* If the workaround fails due to memory/mapping
8130                  * failure, silently drop this packet.
8131                  */
8132                 entry = tnapi->tx_prod;
8133                 budget = tg3_tx_avail(tnapi);
8134                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8135                                                 base_flags, mss, vlan))
8136                         goto drop_nofree;
8137         }
8138
8139         skb_tx_timestamp(skb);
8140         netdev_tx_sent_queue(txq, skb->len);
8141
8142         /* Sync BD data before updating mailbox */
8143         wmb();
8144
8145         tnapi->tx_prod = entry;
8146         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8147                 netif_tx_stop_queue(txq);
8148
8149                 /* netif_tx_stop_queue() must be done before checking
8150                  * checking tx index in tg3_tx_avail() below, because in
8151                  * tg3_tx(), we update tx index before checking for
8152                  * netif_tx_queue_stopped().
8153                  */
8154                 smp_mb();
8155                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8156                         netif_tx_wake_queue(txq);
8157         }
8158
8159         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8160                 /* Packets are ready, update Tx producer idx on card. */
8161                 tw32_tx_mbox(tnapi->prodmbox, entry);
8162                 mmiowb();
8163         }
8164
8165         return NETDEV_TX_OK;
8166
8167 dma_error:
8168         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8169         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8170 drop:
8171         dev_kfree_skb_any(skb);
8172 drop_nofree:
8173         tp->tx_dropped++;
8174         return NETDEV_TX_OK;
8175 }
8176
8177 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8178 {
8179         if (enable) {
8180                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8181                                   MAC_MODE_PORT_MODE_MASK);
8182
8183                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8184
8185                 if (!tg3_flag(tp, 5705_PLUS))
8186                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8187
8188                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8189                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8190                 else
8191                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8192         } else {
8193                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8194
8195                 if (tg3_flag(tp, 5705_PLUS) ||
8196                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8197                     tg3_asic_rev(tp) == ASIC_REV_5700)
8198                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8199         }
8200
8201         tw32(MAC_MODE, tp->mac_mode);
8202         udelay(40);
8203 }
8204
8205 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8206 {
8207         u32 val, bmcr, mac_mode, ptest = 0;
8208
8209         tg3_phy_toggle_apd(tp, false);
8210         tg3_phy_toggle_automdix(tp, false);
8211
8212         if (extlpbk && tg3_phy_set_extloopbk(tp))
8213                 return -EIO;
8214
8215         bmcr = BMCR_FULLDPLX;
8216         switch (speed) {
8217         case SPEED_10:
8218                 break;
8219         case SPEED_100:
8220                 bmcr |= BMCR_SPEED100;
8221                 break;
8222         case SPEED_1000:
8223         default:
8224                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8225                         speed = SPEED_100;
8226                         bmcr |= BMCR_SPEED100;
8227                 } else {
8228                         speed = SPEED_1000;
8229                         bmcr |= BMCR_SPEED1000;
8230                 }
8231         }
8232
8233         if (extlpbk) {
8234                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8235                         tg3_readphy(tp, MII_CTRL1000, &val);
8236                         val |= CTL1000_AS_MASTER |
8237                                CTL1000_ENABLE_MASTER;
8238                         tg3_writephy(tp, MII_CTRL1000, val);
8239                 } else {
8240                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8241                                 MII_TG3_FET_PTEST_TRIM_2;
8242                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8243                 }
8244         } else
8245                 bmcr |= BMCR_LOOPBACK;
8246
8247         tg3_writephy(tp, MII_BMCR, bmcr);
8248
8249         /* The write needs to be flushed for the FETs */
8250         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8251                 tg3_readphy(tp, MII_BMCR, &bmcr);
8252
8253         udelay(40);
8254
8255         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8256             tg3_asic_rev(tp) == ASIC_REV_5785) {
8257                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8258                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8259                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8260
8261                 /* The write needs to be flushed for the AC131 */
8262                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8263         }
8264
8265         /* Reset to prevent losing 1st rx packet intermittently */
8266         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8267             tg3_flag(tp, 5780_CLASS)) {
8268                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8269                 udelay(10);
8270                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8271         }
8272
8273         mac_mode = tp->mac_mode &
8274                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8275         if (speed == SPEED_1000)
8276                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8277         else
8278                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8279
8280         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8281                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8282
8283                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8284                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8285                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8286                         mac_mode |= MAC_MODE_LINK_POLARITY;
8287
8288                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8289                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8290         }
8291
8292         tw32(MAC_MODE, mac_mode);
8293         udelay(40);
8294
8295         return 0;
8296 }
8297
8298 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8299 {
8300         struct tg3 *tp = netdev_priv(dev);
8301
8302         if (features & NETIF_F_LOOPBACK) {
8303                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8304                         return;
8305
8306                 spin_lock_bh(&tp->lock);
8307                 tg3_mac_loopback(tp, true);
8308                 netif_carrier_on(tp->dev);
8309                 spin_unlock_bh(&tp->lock);
8310                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8311         } else {
8312                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8313                         return;
8314
8315                 spin_lock_bh(&tp->lock);
8316                 tg3_mac_loopback(tp, false);
8317                 /* Force link status check */
8318                 tg3_setup_phy(tp, true);
8319                 spin_unlock_bh(&tp->lock);
8320                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8321         }
8322 }
8323
8324 static netdev_features_t tg3_fix_features(struct net_device *dev,
8325         netdev_features_t features)
8326 {
8327         struct tg3 *tp = netdev_priv(dev);
8328
8329         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8330                 features &= ~NETIF_F_ALL_TSO;
8331
8332         return features;
8333 }
8334
8335 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8336 {
8337         netdev_features_t changed = dev->features ^ features;
8338
8339         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8340                 tg3_set_loopback(dev, features);
8341
8342         return 0;
8343 }
8344
8345 static void tg3_rx_prodring_free(struct tg3 *tp,
8346                                  struct tg3_rx_prodring_set *tpr)
8347 {
8348         int i;
8349
8350         if (tpr != &tp->napi[0].prodring) {
8351                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8352                      i = (i + 1) & tp->rx_std_ring_mask)
8353                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8354                                         tp->rx_pkt_map_sz);
8355
8356                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8357                         for (i = tpr->rx_jmb_cons_idx;
8358                              i != tpr->rx_jmb_prod_idx;
8359                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8360                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8361                                                 TG3_RX_JMB_MAP_SZ);
8362                         }
8363                 }
8364
8365                 return;
8366         }
8367
8368         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8369                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8370                                 tp->rx_pkt_map_sz);
8371
8372         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8373                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8374                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8375                                         TG3_RX_JMB_MAP_SZ);
8376         }
8377 }
8378
8379 /* Initialize rx rings for packet processing.
8380  *
8381  * The chip has been shut down and the driver detached from
8382  * the networking, so no interrupts or new tx packets will
8383  * end up in the driver.  tp->{tx,}lock are held and thus
8384  * we may not sleep.
8385  */
8386 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8387                                  struct tg3_rx_prodring_set *tpr)
8388 {
8389         u32 i, rx_pkt_dma_sz;
8390
8391         tpr->rx_std_cons_idx = 0;
8392         tpr->rx_std_prod_idx = 0;
8393         tpr->rx_jmb_cons_idx = 0;
8394         tpr->rx_jmb_prod_idx = 0;
8395
8396         if (tpr != &tp->napi[0].prodring) {
8397                 memset(&tpr->rx_std_buffers[0], 0,
8398                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8399                 if (tpr->rx_jmb_buffers)
8400                         memset(&tpr->rx_jmb_buffers[0], 0,
8401                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8402                 goto done;
8403         }
8404
8405         /* Zero out all descriptors. */
8406         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8407
8408         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8409         if (tg3_flag(tp, 5780_CLASS) &&
8410             tp->dev->mtu > ETH_DATA_LEN)
8411                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8412         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8413
8414         /* Initialize invariants of the rings, we only set this
8415          * stuff once.  This works because the card does not
8416          * write into the rx buffer posting rings.
8417          */
8418         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8419                 struct tg3_rx_buffer_desc *rxd;
8420
8421                 rxd = &tpr->rx_std[i];
8422                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8423                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8424                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8425                                (i << RXD_OPAQUE_INDEX_SHIFT));
8426         }
8427
8428         /* Now allocate fresh SKBs for each rx ring. */
8429         for (i = 0; i < tp->rx_pending; i++) {
8430                 unsigned int frag_size;
8431
8432                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8433                                       &frag_size) < 0) {
8434                         netdev_warn(tp->dev,
8435                                     "Using a smaller RX standard ring. Only "
8436                                     "%d out of %d buffers were allocated "
8437                                     "successfully\n", i, tp->rx_pending);
8438                         if (i == 0)
8439                                 goto initfail;
8440                         tp->rx_pending = i;
8441                         break;
8442                 }
8443         }
8444
8445         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8446                 goto done;
8447
8448         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8449
8450         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8451                 goto done;
8452
8453         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8454                 struct tg3_rx_buffer_desc *rxd;
8455
8456                 rxd = &tpr->rx_jmb[i].std;
8457                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8458                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8459                                   RXD_FLAG_JUMBO;
8460                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8461                        (i << RXD_OPAQUE_INDEX_SHIFT));
8462         }
8463
8464         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8465                 unsigned int frag_size;
8466
8467                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8468                                       &frag_size) < 0) {
8469                         netdev_warn(tp->dev,
8470                                     "Using a smaller RX jumbo ring. Only %d "
8471                                     "out of %d buffers were allocated "
8472                                     "successfully\n", i, tp->rx_jumbo_pending);
8473                         if (i == 0)
8474                                 goto initfail;
8475                         tp->rx_jumbo_pending = i;
8476                         break;
8477                 }
8478         }
8479
8480 done:
8481         return 0;
8482
8483 initfail:
8484         tg3_rx_prodring_free(tp, tpr);
8485         return -ENOMEM;
8486 }
8487
8488 static void tg3_rx_prodring_fini(struct tg3 *tp,
8489                                  struct tg3_rx_prodring_set *tpr)
8490 {
8491         kfree(tpr->rx_std_buffers);
8492         tpr->rx_std_buffers = NULL;
8493         kfree(tpr->rx_jmb_buffers);
8494         tpr->rx_jmb_buffers = NULL;
8495         if (tpr->rx_std) {
8496                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8497                                   tpr->rx_std, tpr->rx_std_mapping);
8498                 tpr->rx_std = NULL;
8499         }
8500         if (tpr->rx_jmb) {
8501                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8502                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8503                 tpr->rx_jmb = NULL;
8504         }
8505 }
8506
8507 static int tg3_rx_prodring_init(struct tg3 *tp,
8508                                 struct tg3_rx_prodring_set *tpr)
8509 {
8510         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8511                                       GFP_KERNEL);
8512         if (!tpr->rx_std_buffers)
8513                 return -ENOMEM;
8514
8515         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8516                                          TG3_RX_STD_RING_BYTES(tp),
8517                                          &tpr->rx_std_mapping,
8518                                          GFP_KERNEL);
8519         if (!tpr->rx_std)
8520                 goto err_out;
8521
8522         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8523                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8524                                               GFP_KERNEL);
8525                 if (!tpr->rx_jmb_buffers)
8526                         goto err_out;
8527
8528                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8529                                                  TG3_RX_JMB_RING_BYTES(tp),
8530                                                  &tpr->rx_jmb_mapping,
8531                                                  GFP_KERNEL);
8532                 if (!tpr->rx_jmb)
8533                         goto err_out;
8534         }
8535
8536         return 0;
8537
8538 err_out:
8539         tg3_rx_prodring_fini(tp, tpr);
8540         return -ENOMEM;
8541 }
8542
8543 /* Free up pending packets in all rx/tx rings.
8544  *
8545  * The chip has been shut down and the driver detached from
8546  * the networking, so no interrupts or new tx packets will
8547  * end up in the driver.  tp->{tx,}lock is not held and we are not
8548  * in an interrupt context and thus may sleep.
8549  */
8550 static void tg3_free_rings(struct tg3 *tp)
8551 {
8552         int i, j;
8553
8554         for (j = 0; j < tp->irq_cnt; j++) {
8555                 struct tg3_napi *tnapi = &tp->napi[j];
8556
8557                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8558
8559                 if (!tnapi->tx_buffers)
8560                         continue;
8561
8562                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8563                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8564
8565                         if (!skb)
8566                                 continue;
8567
8568                         tg3_tx_skb_unmap(tnapi, i,
8569                                          skb_shinfo(skb)->nr_frags - 1);
8570
8571                         dev_consume_skb_any(skb);
8572                 }
8573                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8574         }
8575 }
8576
8577 /* Initialize tx/rx rings for packet processing.
8578  *
8579  * The chip has been shut down and the driver detached from
8580  * the networking, so no interrupts or new tx packets will
8581  * end up in the driver.  tp->{tx,}lock are held and thus
8582  * we may not sleep.
8583  */
8584 static int tg3_init_rings(struct tg3 *tp)
8585 {
8586         int i;
8587
8588         /* Free up all the SKBs. */
8589         tg3_free_rings(tp);
8590
8591         for (i = 0; i < tp->irq_cnt; i++) {
8592                 struct tg3_napi *tnapi = &tp->napi[i];
8593
8594                 tnapi->last_tag = 0;
8595                 tnapi->last_irq_tag = 0;
8596                 tnapi->hw_status->status = 0;
8597                 tnapi->hw_status->status_tag = 0;
8598                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8599
8600                 tnapi->tx_prod = 0;
8601                 tnapi->tx_cons = 0;
8602                 if (tnapi->tx_ring)
8603                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8604
8605                 tnapi->rx_rcb_ptr = 0;
8606                 if (tnapi->rx_rcb)
8607                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8608
8609                 if (tnapi->prodring.rx_std &&
8610                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8611                         tg3_free_rings(tp);
8612                         return -ENOMEM;
8613                 }
8614         }
8615
8616         return 0;
8617 }
8618
8619 static void tg3_mem_tx_release(struct tg3 *tp)
8620 {
8621         int i;
8622
8623         for (i = 0; i < tp->irq_max; i++) {
8624                 struct tg3_napi *tnapi = &tp->napi[i];
8625
8626                 if (tnapi->tx_ring) {
8627                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8628                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8629                         tnapi->tx_ring = NULL;
8630                 }
8631
8632                 kfree(tnapi->tx_buffers);
8633                 tnapi->tx_buffers = NULL;
8634         }
8635 }
8636
8637 static int tg3_mem_tx_acquire(struct tg3 *tp)
8638 {
8639         int i;
8640         struct tg3_napi *tnapi = &tp->napi[0];
8641
8642         /* If multivector TSS is enabled, vector 0 does not handle
8643          * tx interrupts.  Don't allocate any resources for it.
8644          */
8645         if (tg3_flag(tp, ENABLE_TSS))
8646                 tnapi++;
8647
8648         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8649                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8650                                             sizeof(struct tg3_tx_ring_info),
8651                                             GFP_KERNEL);
8652                 if (!tnapi->tx_buffers)
8653                         goto err_out;
8654
8655                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8656                                                     TG3_TX_RING_BYTES,
8657                                                     &tnapi->tx_desc_mapping,
8658                                                     GFP_KERNEL);
8659                 if (!tnapi->tx_ring)
8660                         goto err_out;
8661         }
8662
8663         return 0;
8664
8665 err_out:
8666         tg3_mem_tx_release(tp);
8667         return -ENOMEM;
8668 }
8669
8670 static void tg3_mem_rx_release(struct tg3 *tp)
8671 {
8672         int i;
8673
8674         for (i = 0; i < tp->irq_max; i++) {
8675                 struct tg3_napi *tnapi = &tp->napi[i];
8676
8677                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8678
8679                 if (!tnapi->rx_rcb)
8680                         continue;
8681
8682                 dma_free_coherent(&tp->pdev->dev,
8683                                   TG3_RX_RCB_RING_BYTES(tp),
8684                                   tnapi->rx_rcb,
8685                                   tnapi->rx_rcb_mapping);
8686                 tnapi->rx_rcb = NULL;
8687         }
8688 }
8689
8690 static int tg3_mem_rx_acquire(struct tg3 *tp)
8691 {
8692         unsigned int i, limit;
8693
8694         limit = tp->rxq_cnt;
8695
8696         /* If RSS is enabled, we need a (dummy) producer ring
8697          * set on vector zero.  This is the true hw prodring.
8698          */
8699         if (tg3_flag(tp, ENABLE_RSS))
8700                 limit++;
8701
8702         for (i = 0; i < limit; i++) {
8703                 struct tg3_napi *tnapi = &tp->napi[i];
8704
8705                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8706                         goto err_out;
8707
8708                 /* If multivector RSS is enabled, vector 0
8709                  * does not handle rx or tx interrupts.
8710                  * Don't allocate any resources for it.
8711                  */
8712                 if (!i && tg3_flag(tp, ENABLE_RSS))
8713                         continue;
8714
8715                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8716                                                    TG3_RX_RCB_RING_BYTES(tp),
8717                                                    &tnapi->rx_rcb_mapping,
8718                                                    GFP_KERNEL);
8719                 if (!tnapi->rx_rcb)
8720                         goto err_out;
8721         }
8722
8723         return 0;
8724
8725 err_out:
8726         tg3_mem_rx_release(tp);
8727         return -ENOMEM;
8728 }
8729
8730 /*
8731  * Must not be invoked with interrupt sources disabled and
8732  * the hardware shutdown down.
8733  */
8734 static void tg3_free_consistent(struct tg3 *tp)
8735 {
8736         int i;
8737
8738         for (i = 0; i < tp->irq_cnt; i++) {
8739                 struct tg3_napi *tnapi = &tp->napi[i];
8740
8741                 if (tnapi->hw_status) {
8742                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8743                                           tnapi->hw_status,
8744                                           tnapi->status_mapping);
8745                         tnapi->hw_status = NULL;
8746                 }
8747         }
8748
8749         tg3_mem_rx_release(tp);
8750         tg3_mem_tx_release(tp);
8751
8752         /* tp->hw_stats can be referenced safely:
8753          *     1. under rtnl_lock
8754          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8755          */
8756         if (tp->hw_stats) {
8757                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8758                                   tp->hw_stats, tp->stats_mapping);
8759                 tp->hw_stats = NULL;
8760         }
8761 }
8762
8763 /*
8764  * Must not be invoked with interrupt sources disabled and
8765  * the hardware shutdown down.  Can sleep.
8766  */
8767 static int tg3_alloc_consistent(struct tg3 *tp)
8768 {
8769         int i;
8770
8771         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8772                                           sizeof(struct tg3_hw_stats),
8773                                           &tp->stats_mapping, GFP_KERNEL);
8774         if (!tp->hw_stats)
8775                 goto err_out;
8776
8777         for (i = 0; i < tp->irq_cnt; i++) {
8778                 struct tg3_napi *tnapi = &tp->napi[i];
8779                 struct tg3_hw_status *sblk;
8780
8781                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8782                                                       TG3_HW_STATUS_SIZE,
8783                                                       &tnapi->status_mapping,
8784                                                       GFP_KERNEL);
8785                 if (!tnapi->hw_status)
8786                         goto err_out;
8787
8788                 sblk = tnapi->hw_status;
8789
8790                 if (tg3_flag(tp, ENABLE_RSS)) {
8791                         u16 *prodptr = NULL;
8792
8793                         /*
8794                          * When RSS is enabled, the status block format changes
8795                          * slightly.  The "rx_jumbo_consumer", "reserved",
8796                          * and "rx_mini_consumer" members get mapped to the
8797                          * other three rx return ring producer indexes.
8798                          */
8799                         switch (i) {
8800                         case 1:
8801                                 prodptr = &sblk->idx[0].rx_producer;
8802                                 break;
8803                         case 2:
8804                                 prodptr = &sblk->rx_jumbo_consumer;
8805                                 break;
8806                         case 3:
8807                                 prodptr = &sblk->reserved;
8808                                 break;
8809                         case 4:
8810                                 prodptr = &sblk->rx_mini_consumer;
8811                                 break;
8812                         }
8813                         tnapi->rx_rcb_prod_idx = prodptr;
8814                 } else {
8815                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8816                 }
8817         }
8818
8819         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8820                 goto err_out;
8821
8822         return 0;
8823
8824 err_out:
8825         tg3_free_consistent(tp);
8826         return -ENOMEM;
8827 }
8828
8829 #define MAX_WAIT_CNT 1000
8830
8831 /* To stop a block, clear the enable bit and poll till it
8832  * clears.  tp->lock is held.
8833  */
8834 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8835 {
8836         unsigned int i;
8837         u32 val;
8838
8839         if (tg3_flag(tp, 5705_PLUS)) {
8840                 switch (ofs) {
8841                 case RCVLSC_MODE:
8842                 case DMAC_MODE:
8843                 case MBFREE_MODE:
8844                 case BUFMGR_MODE:
8845                 case MEMARB_MODE:
8846                         /* We can't enable/disable these bits of the
8847                          * 5705/5750, just say success.
8848                          */
8849                         return 0;
8850
8851                 default:
8852                         break;
8853                 }
8854         }
8855
8856         val = tr32(ofs);
8857         val &= ~enable_bit;
8858         tw32_f(ofs, val);
8859
8860         for (i = 0; i < MAX_WAIT_CNT; i++) {
8861                 if (pci_channel_offline(tp->pdev)) {
8862                         dev_err(&tp->pdev->dev,
8863                                 "tg3_stop_block device offline, "
8864                                 "ofs=%lx enable_bit=%x\n",
8865                                 ofs, enable_bit);
8866                         return -ENODEV;
8867                 }
8868
8869                 udelay(100);
8870                 val = tr32(ofs);
8871                 if ((val & enable_bit) == 0)
8872                         break;
8873         }
8874
8875         if (i == MAX_WAIT_CNT && !silent) {
8876                 dev_err(&tp->pdev->dev,
8877                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8878                         ofs, enable_bit);
8879                 return -ENODEV;
8880         }
8881
8882         return 0;
8883 }
8884
8885 /* tp->lock is held. */
8886 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8887 {
8888         int i, err;
8889
8890         tg3_disable_ints(tp);
8891
8892         if (pci_channel_offline(tp->pdev)) {
8893                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8894                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8895                 err = -ENODEV;
8896                 goto err_no_dev;
8897         }
8898
8899         tp->rx_mode &= ~RX_MODE_ENABLE;
8900         tw32_f(MAC_RX_MODE, tp->rx_mode);
8901         udelay(10);
8902
8903         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8904         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8905         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8906         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8907         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8908         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8909
8910         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8911         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8912         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8913         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8914         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8915         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8916         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8917
8918         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8919         tw32_f(MAC_MODE, tp->mac_mode);
8920         udelay(40);
8921
8922         tp->tx_mode &= ~TX_MODE_ENABLE;
8923         tw32_f(MAC_TX_MODE, tp->tx_mode);
8924
8925         for (i = 0; i < MAX_WAIT_CNT; i++) {
8926                 udelay(100);
8927                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8928                         break;
8929         }
8930         if (i >= MAX_WAIT_CNT) {
8931                 dev_err(&tp->pdev->dev,
8932                         "%s timed out, TX_MODE_ENABLE will not clear "
8933                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8934                 err |= -ENODEV;
8935         }
8936
8937         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8938         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8939         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8940
8941         tw32(FTQ_RESET, 0xffffffff);
8942         tw32(FTQ_RESET, 0x00000000);
8943
8944         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8945         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8946
8947 err_no_dev:
8948         for (i = 0; i < tp->irq_cnt; i++) {
8949                 struct tg3_napi *tnapi = &tp->napi[i];
8950                 if (tnapi->hw_status)
8951                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8952         }
8953
8954         return err;
8955 }
8956
8957 /* Save PCI command register before chip reset */
8958 static void tg3_save_pci_state(struct tg3 *tp)
8959 {
8960         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8961 }
8962
8963 /* Restore PCI state after chip reset */
8964 static void tg3_restore_pci_state(struct tg3 *tp)
8965 {
8966         u32 val;
8967
8968         /* Re-enable indirect register accesses. */
8969         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8970                                tp->misc_host_ctrl);
8971
8972         /* Set MAX PCI retry to zero. */
8973         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8974         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8975             tg3_flag(tp, PCIX_MODE))
8976                 val |= PCISTATE_RETRY_SAME_DMA;
8977         /* Allow reads and writes to the APE register and memory space. */
8978         if (tg3_flag(tp, ENABLE_APE))
8979                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8980                        PCISTATE_ALLOW_APE_SHMEM_WR |
8981                        PCISTATE_ALLOW_APE_PSPACE_WR;
8982         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8983
8984         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8985
8986         if (!tg3_flag(tp, PCI_EXPRESS)) {
8987                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8988                                       tp->pci_cacheline_sz);
8989                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8990                                       tp->pci_lat_timer);
8991         }
8992
8993         /* Make sure PCI-X relaxed ordering bit is clear. */
8994         if (tg3_flag(tp, PCIX_MODE)) {
8995                 u16 pcix_cmd;
8996
8997                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8998                                      &pcix_cmd);
8999                 pcix_cmd &= ~PCI_X_CMD_ERO;
9000                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9001                                       pcix_cmd);
9002         }
9003
9004         if (tg3_flag(tp, 5780_CLASS)) {
9005
9006                 /* Chip reset on 5780 will reset MSI enable bit,
9007                  * so need to restore it.
9008                  */
9009                 if (tg3_flag(tp, USING_MSI)) {
9010                         u16 ctrl;
9011
9012                         pci_read_config_word(tp->pdev,
9013                                              tp->msi_cap + PCI_MSI_FLAGS,
9014                                              &ctrl);
9015                         pci_write_config_word(tp->pdev,
9016                                               tp->msi_cap + PCI_MSI_FLAGS,
9017                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9018                         val = tr32(MSGINT_MODE);
9019                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9020                 }
9021         }
9022 }
9023
9024 static void tg3_override_clk(struct tg3 *tp)
9025 {
9026         u32 val;
9027
9028         switch (tg3_asic_rev(tp)) {
9029         case ASIC_REV_5717:
9030                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9031                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9032                      TG3_CPMU_MAC_ORIDE_ENABLE);
9033                 break;
9034
9035         case ASIC_REV_5719:
9036         case ASIC_REV_5720:
9037                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9038                 break;
9039
9040         default:
9041                 return;
9042         }
9043 }
9044
9045 static void tg3_restore_clk(struct tg3 *tp)
9046 {
9047         u32 val;
9048
9049         switch (tg3_asic_rev(tp)) {
9050         case ASIC_REV_5717:
9051                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9052                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9053                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9054                 break;
9055
9056         case ASIC_REV_5719:
9057         case ASIC_REV_5720:
9058                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9059                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9060                 break;
9061
9062         default:
9063                 return;
9064         }
9065 }
9066
9067 /* tp->lock is held. */
9068 static int tg3_chip_reset(struct tg3 *tp)
9069         __releases(tp->lock)
9070         __acquires(tp->lock)
9071 {
9072         u32 val;
9073         void (*write_op)(struct tg3 *, u32, u32);
9074         int i, err;
9075
9076         if (!pci_device_is_present(tp->pdev))
9077                 return -ENODEV;
9078
9079         tg3_nvram_lock(tp);
9080
9081         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9082
9083         /* No matching tg3_nvram_unlock() after this because
9084          * chip reset below will undo the nvram lock.
9085          */
9086         tp->nvram_lock_cnt = 0;
9087
9088         /* GRC_MISC_CFG core clock reset will clear the memory
9089          * enable bit in PCI register 4 and the MSI enable bit
9090          * on some chips, so we save relevant registers here.
9091          */
9092         tg3_save_pci_state(tp);
9093
9094         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9095             tg3_flag(tp, 5755_PLUS))
9096                 tw32(GRC_FASTBOOT_PC, 0);
9097
9098         /*
9099          * We must avoid the readl() that normally takes place.
9100          * It locks machines, causes machine checks, and other
9101          * fun things.  So, temporarily disable the 5701
9102          * hardware workaround, while we do the reset.
9103          */
9104         write_op = tp->write32;
9105         if (write_op == tg3_write_flush_reg32)
9106                 tp->write32 = tg3_write32;
9107
9108         /* Prevent the irq handler from reading or writing PCI registers
9109          * during chip reset when the memory enable bit in the PCI command
9110          * register may be cleared.  The chip does not generate interrupt
9111          * at this time, but the irq handler may still be called due to irq
9112          * sharing or irqpoll.
9113          */
9114         tg3_flag_set(tp, CHIP_RESETTING);
9115         for (i = 0; i < tp->irq_cnt; i++) {
9116                 struct tg3_napi *tnapi = &tp->napi[i];
9117                 if (tnapi->hw_status) {
9118                         tnapi->hw_status->status = 0;
9119                         tnapi->hw_status->status_tag = 0;
9120                 }
9121                 tnapi->last_tag = 0;
9122                 tnapi->last_irq_tag = 0;
9123         }
9124         smp_mb();
9125
9126         tg3_full_unlock(tp);
9127
9128         for (i = 0; i < tp->irq_cnt; i++)
9129                 synchronize_irq(tp->napi[i].irq_vec);
9130
9131         tg3_full_lock(tp, 0);
9132
9133         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9134                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9135                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9136         }
9137
9138         /* do the reset */
9139         val = GRC_MISC_CFG_CORECLK_RESET;
9140
9141         if (tg3_flag(tp, PCI_EXPRESS)) {
9142                 /* Force PCIe 1.0a mode */
9143                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9144                     !tg3_flag(tp, 57765_PLUS) &&
9145                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9146                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9147                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9148
9149                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9150                         tw32(GRC_MISC_CFG, (1 << 29));
9151                         val |= (1 << 29);
9152                 }
9153         }
9154
9155         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9156                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9157                 tw32(GRC_VCPU_EXT_CTRL,
9158                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9159         }
9160
9161         /* Set the clock to the highest frequency to avoid timeouts. With link
9162          * aware mode, the clock speed could be slow and bootcode does not
9163          * complete within the expected time. Override the clock to allow the
9164          * bootcode to finish sooner and then restore it.
9165          */
9166         tg3_override_clk(tp);
9167
9168         /* Manage gphy power for all CPMU absent PCIe devices. */
9169         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9170                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9171
9172         tw32(GRC_MISC_CFG, val);
9173
9174         /* restore 5701 hardware bug workaround write method */
9175         tp->write32 = write_op;
9176
9177         /* Unfortunately, we have to delay before the PCI read back.
9178          * Some 575X chips even will not respond to a PCI cfg access
9179          * when the reset command is given to the chip.
9180          *
9181          * How do these hardware designers expect things to work
9182          * properly if the PCI write is posted for a long period
9183          * of time?  It is always necessary to have some method by
9184          * which a register read back can occur to push the write
9185          * out which does the reset.
9186          *
9187          * For most tg3 variants the trick below was working.
9188          * Ho hum...
9189          */
9190         udelay(120);
9191
9192         /* Flush PCI posted writes.  The normal MMIO registers
9193          * are inaccessible at this time so this is the only
9194          * way to make this reliably (actually, this is no longer
9195          * the case, see above).  I tried to use indirect
9196          * register read/write but this upset some 5701 variants.
9197          */
9198         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9199
9200         udelay(120);
9201
9202         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9203                 u16 val16;
9204
9205                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9206                         int j;
9207                         u32 cfg_val;
9208
9209                         /* Wait for link training to complete.  */
9210                         for (j = 0; j < 5000; j++)
9211                                 udelay(100);
9212
9213                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9214                         pci_write_config_dword(tp->pdev, 0xc4,
9215                                                cfg_val | (1 << 15));
9216                 }
9217
9218                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9219                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9220                 /*
9221                  * Older PCIe devices only support the 128 byte
9222                  * MPS setting.  Enforce the restriction.
9223                  */
9224                 if (!tg3_flag(tp, CPMU_PRESENT))
9225                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9226                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9227
9228                 /* Clear error status */
9229                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9230                                       PCI_EXP_DEVSTA_CED |
9231                                       PCI_EXP_DEVSTA_NFED |
9232                                       PCI_EXP_DEVSTA_FED |
9233                                       PCI_EXP_DEVSTA_URD);
9234         }
9235
9236         tg3_restore_pci_state(tp);
9237
9238         tg3_flag_clear(tp, CHIP_RESETTING);
9239         tg3_flag_clear(tp, ERROR_PROCESSED);
9240
9241         val = 0;
9242         if (tg3_flag(tp, 5780_CLASS))
9243                 val = tr32(MEMARB_MODE);
9244         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9245
9246         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9247                 tg3_stop_fw(tp);
9248                 tw32(0x5000, 0x400);
9249         }
9250
9251         if (tg3_flag(tp, IS_SSB_CORE)) {
9252                 /*
9253                  * BCM4785: In order to avoid repercussions from using
9254                  * potentially defective internal ROM, stop the Rx RISC CPU,
9255                  * which is not required.
9256                  */
9257                 tg3_stop_fw(tp);
9258                 tg3_halt_cpu(tp, RX_CPU_BASE);
9259         }
9260
9261         err = tg3_poll_fw(tp);
9262         if (err)
9263                 return err;
9264
9265         tw32(GRC_MODE, tp->grc_mode);
9266
9267         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9268                 val = tr32(0xc4);
9269
9270                 tw32(0xc4, val | (1 << 15));
9271         }
9272
9273         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9274             tg3_asic_rev(tp) == ASIC_REV_5705) {
9275                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9276                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9277                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9278                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9279         }
9280
9281         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9282                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9283                 val = tp->mac_mode;
9284         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9285                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9286                 val = tp->mac_mode;
9287         } else
9288                 val = 0;
9289
9290         tw32_f(MAC_MODE, val);
9291         udelay(40);
9292
9293         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9294
9295         tg3_mdio_start(tp);
9296
9297         if (tg3_flag(tp, PCI_EXPRESS) &&
9298             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9299             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9300             !tg3_flag(tp, 57765_PLUS)) {
9301                 val = tr32(0x7c00);
9302
9303                 tw32(0x7c00, val | (1 << 25));
9304         }
9305
9306         tg3_restore_clk(tp);
9307
9308         /* Increase the core clock speed to fix tx timeout issue for 5762
9309          * with 100Mbps link speed.
9310          */
9311         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9312                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9313                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9314                      TG3_CPMU_MAC_ORIDE_ENABLE);
9315         }
9316
9317         /* Reprobe ASF enable state.  */
9318         tg3_flag_clear(tp, ENABLE_ASF);
9319         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9320                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9321
9322         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9323         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9324         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9325                 u32 nic_cfg;
9326
9327                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9328                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9329                         tg3_flag_set(tp, ENABLE_ASF);
9330                         tp->last_event_jiffies = jiffies;
9331                         if (tg3_flag(tp, 5750_PLUS))
9332                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9333
9334                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9335                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9336                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9337                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9338                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9339                 }
9340         }
9341
9342         return 0;
9343 }
9344
9345 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9346 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9347 static void __tg3_set_rx_mode(struct net_device *);
9348
9349 /* tp->lock is held. */
9350 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9351 {
9352         int err;
9353
9354         tg3_stop_fw(tp);
9355
9356         tg3_write_sig_pre_reset(tp, kind);
9357
9358         tg3_abort_hw(tp, silent);
9359         err = tg3_chip_reset(tp);
9360
9361         __tg3_set_mac_addr(tp, false);
9362
9363         tg3_write_sig_legacy(tp, kind);
9364         tg3_write_sig_post_reset(tp, kind);
9365
9366         if (tp->hw_stats) {
9367                 /* Save the stats across chip resets... */
9368                 tg3_get_nstats(tp, &tp->net_stats_prev);
9369                 tg3_get_estats(tp, &tp->estats_prev);
9370
9371                 /* And make sure the next sample is new data */
9372                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9373         }
9374
9375         return err;
9376 }
9377
9378 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9379 {
9380         struct tg3 *tp = netdev_priv(dev);
9381         struct sockaddr *addr = p;
9382         int err = 0;
9383         bool skip_mac_1 = false;
9384
9385         if (!is_valid_ether_addr(addr->sa_data))
9386                 return -EADDRNOTAVAIL;
9387
9388         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9389
9390         if (!netif_running(dev))
9391                 return 0;
9392
9393         if (tg3_flag(tp, ENABLE_ASF)) {
9394                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9395
9396                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9397                 addr0_low = tr32(MAC_ADDR_0_LOW);
9398                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9399                 addr1_low = tr32(MAC_ADDR_1_LOW);
9400
9401                 /* Skip MAC addr 1 if ASF is using it. */
9402                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9403                     !(addr1_high == 0 && addr1_low == 0))
9404                         skip_mac_1 = true;
9405         }
9406         spin_lock_bh(&tp->lock);
9407         __tg3_set_mac_addr(tp, skip_mac_1);
9408         __tg3_set_rx_mode(dev);
9409         spin_unlock_bh(&tp->lock);
9410
9411         return err;
9412 }
9413
9414 /* tp->lock is held. */
9415 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9416                            dma_addr_t mapping, u32 maxlen_flags,
9417                            u32 nic_addr)
9418 {
9419         tg3_write_mem(tp,
9420                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9421                       ((u64) mapping >> 32));
9422         tg3_write_mem(tp,
9423                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9424                       ((u64) mapping & 0xffffffff));
9425         tg3_write_mem(tp,
9426                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9427                        maxlen_flags);
9428
9429         if (!tg3_flag(tp, 5705_PLUS))
9430                 tg3_write_mem(tp,
9431                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9432                               nic_addr);
9433 }
9434
9435
9436 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9437 {
9438         int i = 0;
9439
9440         if (!tg3_flag(tp, ENABLE_TSS)) {
9441                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9442                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9443                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9444         } else {
9445                 tw32(HOSTCC_TXCOL_TICKS, 0);
9446                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9447                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9448
9449                 for (; i < tp->txq_cnt; i++) {
9450                         u32 reg;
9451
9452                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9453                         tw32(reg, ec->tx_coalesce_usecs);
9454                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9455                         tw32(reg, ec->tx_max_coalesced_frames);
9456                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9457                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9458                 }
9459         }
9460
9461         for (; i < tp->irq_max - 1; i++) {
9462                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9463                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9464                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9465         }
9466 }
9467
9468 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9469 {
9470         int i = 0;
9471         u32 limit = tp->rxq_cnt;
9472
9473         if (!tg3_flag(tp, ENABLE_RSS)) {
9474                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9475                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9476                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9477                 limit--;
9478         } else {
9479                 tw32(HOSTCC_RXCOL_TICKS, 0);
9480                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9481                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9482         }
9483
9484         for (; i < limit; i++) {
9485                 u32 reg;
9486
9487                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9488                 tw32(reg, ec->rx_coalesce_usecs);
9489                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9490                 tw32(reg, ec->rx_max_coalesced_frames);
9491                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9492                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9493         }
9494
9495         for (; i < tp->irq_max - 1; i++) {
9496                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9497                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9498                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9499         }
9500 }
9501
9502 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9503 {
9504         tg3_coal_tx_init(tp, ec);
9505         tg3_coal_rx_init(tp, ec);
9506
9507         if (!tg3_flag(tp, 5705_PLUS)) {
9508                 u32 val = ec->stats_block_coalesce_usecs;
9509
9510                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9511                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9512
9513                 if (!tp->link_up)
9514                         val = 0;
9515
9516                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9517         }
9518 }
9519
9520 /* tp->lock is held. */
9521 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9522 {
9523         u32 txrcb, limit;
9524
9525         /* Disable all transmit rings but the first. */
9526         if (!tg3_flag(tp, 5705_PLUS))
9527                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9528         else if (tg3_flag(tp, 5717_PLUS))
9529                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9530         else if (tg3_flag(tp, 57765_CLASS) ||
9531                  tg3_asic_rev(tp) == ASIC_REV_5762)
9532                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9533         else
9534                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9535
9536         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9537              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9538                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9539                               BDINFO_FLAGS_DISABLED);
9540 }
9541
9542 /* tp->lock is held. */
9543 static void tg3_tx_rcbs_init(struct tg3 *tp)
9544 {
9545         int i = 0;
9546         u32 txrcb = NIC_SRAM_SEND_RCB;
9547
9548         if (tg3_flag(tp, ENABLE_TSS))
9549                 i++;
9550
9551         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9552                 struct tg3_napi *tnapi = &tp->napi[i];
9553
9554                 if (!tnapi->tx_ring)
9555                         continue;
9556
9557                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9558                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9559                                NIC_SRAM_TX_BUFFER_DESC);
9560         }
9561 }
9562
9563 /* tp->lock is held. */
9564 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9565 {
9566         u32 rxrcb, limit;
9567
9568         /* Disable all receive return rings but the first. */
9569         if (tg3_flag(tp, 5717_PLUS))
9570                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9571         else if (!tg3_flag(tp, 5705_PLUS))
9572                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9573         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9574                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9575                  tg3_flag(tp, 57765_CLASS))
9576                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9577         else
9578                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9579
9580         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9581              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9582                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9583                               BDINFO_FLAGS_DISABLED);
9584 }
9585
9586 /* tp->lock is held. */
9587 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9588 {
9589         int i = 0;
9590         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9591
9592         if (tg3_flag(tp, ENABLE_RSS))
9593                 i++;
9594
9595         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9596                 struct tg3_napi *tnapi = &tp->napi[i];
9597
9598                 if (!tnapi->rx_rcb)
9599                         continue;
9600
9601                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9602                                (tp->rx_ret_ring_mask + 1) <<
9603                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9604         }
9605 }
9606
9607 /* tp->lock is held. */
9608 static void tg3_rings_reset(struct tg3 *tp)
9609 {
9610         int i;
9611         u32 stblk;
9612         struct tg3_napi *tnapi = &tp->napi[0];
9613
9614         tg3_tx_rcbs_disable(tp);
9615
9616         tg3_rx_ret_rcbs_disable(tp);
9617
9618         /* Disable interrupts */
9619         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9620         tp->napi[0].chk_msi_cnt = 0;
9621         tp->napi[0].last_rx_cons = 0;
9622         tp->napi[0].last_tx_cons = 0;
9623
9624         /* Zero mailbox registers. */
9625         if (tg3_flag(tp, SUPPORT_MSIX)) {
9626                 for (i = 1; i < tp->irq_max; i++) {
9627                         tp->napi[i].tx_prod = 0;
9628                         tp->napi[i].tx_cons = 0;
9629                         if (tg3_flag(tp, ENABLE_TSS))
9630                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9631                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9632                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9633                         tp->napi[i].chk_msi_cnt = 0;
9634                         tp->napi[i].last_rx_cons = 0;
9635                         tp->napi[i].last_tx_cons = 0;
9636                 }
9637                 if (!tg3_flag(tp, ENABLE_TSS))
9638                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9639         } else {
9640                 tp->napi[0].tx_prod = 0;
9641                 tp->napi[0].tx_cons = 0;
9642                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9643                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9644         }
9645
9646         /* Make sure the NIC-based send BD rings are disabled. */
9647         if (!tg3_flag(tp, 5705_PLUS)) {
9648                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9649                 for (i = 0; i < 16; i++)
9650                         tw32_tx_mbox(mbox + i * 8, 0);
9651         }
9652
9653         /* Clear status block in ram. */
9654         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9655
9656         /* Set status block DMA address */
9657         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9658              ((u64) tnapi->status_mapping >> 32));
9659         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9660              ((u64) tnapi->status_mapping & 0xffffffff));
9661
9662         stblk = HOSTCC_STATBLCK_RING1;
9663
9664         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9665                 u64 mapping = (u64)tnapi->status_mapping;
9666                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9667                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9668                 stblk += 8;
9669
9670                 /* Clear status block in ram. */
9671                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9672         }
9673
9674         tg3_tx_rcbs_init(tp);
9675         tg3_rx_ret_rcbs_init(tp);
9676 }
9677
9678 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9679 {
9680         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9681
9682         if (!tg3_flag(tp, 5750_PLUS) ||
9683             tg3_flag(tp, 5780_CLASS) ||
9684             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9685             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9686             tg3_flag(tp, 57765_PLUS))
9687                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9688         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9689                  tg3_asic_rev(tp) == ASIC_REV_5787)
9690                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9691         else
9692                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9693
9694         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9695         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9696
9697         val = min(nic_rep_thresh, host_rep_thresh);
9698         tw32(RCVBDI_STD_THRESH, val);
9699
9700         if (tg3_flag(tp, 57765_PLUS))
9701                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9702
9703         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9704                 return;
9705
9706         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9707
9708         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9709
9710         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9711         tw32(RCVBDI_JUMBO_THRESH, val);
9712
9713         if (tg3_flag(tp, 57765_PLUS))
9714                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9715 }
9716
9717 static inline u32 calc_crc(unsigned char *buf, int len)
9718 {
9719         u32 reg;
9720         u32 tmp;
9721         int j, k;
9722
9723         reg = 0xffffffff;
9724
9725         for (j = 0; j < len; j++) {
9726                 reg ^= buf[j];
9727
9728                 for (k = 0; k < 8; k++) {
9729                         tmp = reg & 0x01;
9730
9731                         reg >>= 1;
9732
9733                         if (tmp)
9734                                 reg ^= CRC32_POLY_LE;
9735                 }
9736         }
9737
9738         return ~reg;
9739 }
9740
9741 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9742 {
9743         /* accept or reject all multicast frames */
9744         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9745         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9746         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9747         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9748 }
9749
9750 static void __tg3_set_rx_mode(struct net_device *dev)
9751 {
9752         struct tg3 *tp = netdev_priv(dev);
9753         u32 rx_mode;
9754
9755         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9756                                   RX_MODE_KEEP_VLAN_TAG);
9757
9758 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9759         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9760          * flag clear.
9761          */
9762         if (!tg3_flag(tp, ENABLE_ASF))
9763                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9764 #endif
9765
9766         if (dev->flags & IFF_PROMISC) {
9767                 /* Promiscuous mode. */
9768                 rx_mode |= RX_MODE_PROMISC;
9769         } else if (dev->flags & IFF_ALLMULTI) {
9770                 /* Accept all multicast. */
9771                 tg3_set_multi(tp, 1);
9772         } else if (netdev_mc_empty(dev)) {
9773                 /* Reject all multicast. */
9774                 tg3_set_multi(tp, 0);
9775         } else {
9776                 /* Accept one or more multicast(s). */
9777                 struct netdev_hw_addr *ha;
9778                 u32 mc_filter[4] = { 0, };
9779                 u32 regidx;
9780                 u32 bit;
9781                 u32 crc;
9782
9783                 netdev_for_each_mc_addr(ha, dev) {
9784                         crc = calc_crc(ha->addr, ETH_ALEN);
9785                         bit = ~crc & 0x7f;
9786                         regidx = (bit & 0x60) >> 5;
9787                         bit &= 0x1f;
9788                         mc_filter[regidx] |= (1 << bit);
9789                 }
9790
9791                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9792                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9793                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9794                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9795         }
9796
9797         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9798                 rx_mode |= RX_MODE_PROMISC;
9799         } else if (!(dev->flags & IFF_PROMISC)) {
9800                 /* Add all entries into to the mac addr filter list */
9801                 int i = 0;
9802                 struct netdev_hw_addr *ha;
9803
9804                 netdev_for_each_uc_addr(ha, dev) {
9805                         __tg3_set_one_mac_addr(tp, ha->addr,
9806                                                i + TG3_UCAST_ADDR_IDX(tp));
9807                         i++;
9808                 }
9809         }
9810
9811         if (rx_mode != tp->rx_mode) {
9812                 tp->rx_mode = rx_mode;
9813                 tw32_f(MAC_RX_MODE, rx_mode);
9814                 udelay(10);
9815         }
9816 }
9817
9818 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9819 {
9820         int i;
9821
9822         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9823                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9824 }
9825
9826 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9827 {
9828         int i;
9829
9830         if (!tg3_flag(tp, SUPPORT_MSIX))
9831                 return;
9832
9833         if (tp->rxq_cnt == 1) {
9834                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9835                 return;
9836         }
9837
9838         /* Validate table against current IRQ count */
9839         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9840                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9841                         break;
9842         }
9843
9844         if (i != TG3_RSS_INDIR_TBL_SIZE)
9845                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9846 }
9847
9848 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9849 {
9850         int i = 0;
9851         u32 reg = MAC_RSS_INDIR_TBL_0;
9852
9853         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9854                 u32 val = tp->rss_ind_tbl[i];
9855                 i++;
9856                 for (; i % 8; i++) {
9857                         val <<= 4;
9858                         val |= tp->rss_ind_tbl[i];
9859                 }
9860                 tw32(reg, val);
9861                 reg += 4;
9862         }
9863 }
9864
9865 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9866 {
9867         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9868                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9869         else
9870                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9871 }
9872
9873 /* tp->lock is held. */
9874 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9875 {
9876         u32 val, rdmac_mode;
9877         int i, err, limit;
9878         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9879
9880         tg3_disable_ints(tp);
9881
9882         tg3_stop_fw(tp);
9883
9884         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9885
9886         if (tg3_flag(tp, INIT_COMPLETE))
9887                 tg3_abort_hw(tp, 1);
9888
9889         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9890             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9891                 tg3_phy_pull_config(tp);
9892                 tg3_eee_pull_config(tp, NULL);
9893                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9894         }
9895
9896         /* Enable MAC control of LPI */
9897         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9898                 tg3_setup_eee(tp);
9899
9900         if (reset_phy)
9901                 tg3_phy_reset(tp);
9902
9903         err = tg3_chip_reset(tp);
9904         if (err)
9905                 return err;
9906
9907         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9908
9909         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9910                 val = tr32(TG3_CPMU_CTRL);
9911                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9912                 tw32(TG3_CPMU_CTRL, val);
9913
9914                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9915                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9916                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9917                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9918
9919                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9920                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9921                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9922                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9923
9924                 val = tr32(TG3_CPMU_HST_ACC);
9925                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9926                 val |= CPMU_HST_ACC_MACCLK_6_25;
9927                 tw32(TG3_CPMU_HST_ACC, val);
9928         }
9929
9930         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9931                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9932                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9933                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9934                 tw32(PCIE_PWR_MGMT_THRESH, val);
9935
9936                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9937                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9938
9939                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9940
9941                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9942                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9943         }
9944
9945         if (tg3_flag(tp, L1PLLPD_EN)) {
9946                 u32 grc_mode = tr32(GRC_MODE);
9947
9948                 /* Access the lower 1K of PL PCIE block registers. */
9949                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9950                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9951
9952                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9953                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9954                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9955
9956                 tw32(GRC_MODE, grc_mode);
9957         }
9958
9959         if (tg3_flag(tp, 57765_CLASS)) {
9960                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9961                         u32 grc_mode = tr32(GRC_MODE);
9962
9963                         /* Access the lower 1K of PL PCIE block registers. */
9964                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9965                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9966
9967                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9968                                    TG3_PCIE_PL_LO_PHYCTL5);
9969                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9970                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9971
9972                         tw32(GRC_MODE, grc_mode);
9973                 }
9974
9975                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9976                         u32 grc_mode;
9977
9978                         /* Fix transmit hangs */
9979                         val = tr32(TG3_CPMU_PADRNG_CTL);
9980                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9981                         tw32(TG3_CPMU_PADRNG_CTL, val);
9982
9983                         grc_mode = tr32(GRC_MODE);
9984
9985                         /* Access the lower 1K of DL PCIE block registers. */
9986                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9987                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9988
9989                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9990                                    TG3_PCIE_DL_LO_FTSMAX);
9991                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9992                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9993                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9994
9995                         tw32(GRC_MODE, grc_mode);
9996                 }
9997
9998                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9999                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10000                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10001                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10002         }
10003
10004         /* This works around an issue with Athlon chipsets on
10005          * B3 tigon3 silicon.  This bit has no effect on any
10006          * other revision.  But do not set this on PCI Express
10007          * chips and don't even touch the clocks if the CPMU is present.
10008          */
10009         if (!tg3_flag(tp, CPMU_PRESENT)) {
10010                 if (!tg3_flag(tp, PCI_EXPRESS))
10011                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10012                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10013         }
10014
10015         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10016             tg3_flag(tp, PCIX_MODE)) {
10017                 val = tr32(TG3PCI_PCISTATE);
10018                 val |= PCISTATE_RETRY_SAME_DMA;
10019                 tw32(TG3PCI_PCISTATE, val);
10020         }
10021
10022         if (tg3_flag(tp, ENABLE_APE)) {
10023                 /* Allow reads and writes to the
10024                  * APE register and memory space.
10025                  */
10026                 val = tr32(TG3PCI_PCISTATE);
10027                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10028                        PCISTATE_ALLOW_APE_SHMEM_WR |
10029                        PCISTATE_ALLOW_APE_PSPACE_WR;
10030                 tw32(TG3PCI_PCISTATE, val);
10031         }
10032
10033         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10034                 /* Enable some hw fixes.  */
10035                 val = tr32(TG3PCI_MSI_DATA);
10036                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10037                 tw32(TG3PCI_MSI_DATA, val);
10038         }
10039
10040         /* Descriptor ring init may make accesses to the
10041          * NIC SRAM area to setup the TX descriptors, so we
10042          * can only do this after the hardware has been
10043          * successfully reset.
10044          */
10045         err = tg3_init_rings(tp);
10046         if (err)
10047                 return err;
10048
10049         if (tg3_flag(tp, 57765_PLUS)) {
10050                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10051                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10052                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10053                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10054                 if (!tg3_flag(tp, 57765_CLASS) &&
10055                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10056                     tg3_asic_rev(tp) != ASIC_REV_5762)
10057                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10058                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10059         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10060                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10061                 /* This value is determined during the probe time DMA
10062                  * engine test, tg3_test_dma.
10063                  */
10064                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10065         }
10066
10067         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10068                           GRC_MODE_4X_NIC_SEND_RINGS |
10069                           GRC_MODE_NO_TX_PHDR_CSUM |
10070                           GRC_MODE_NO_RX_PHDR_CSUM);
10071         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10072
10073         /* Pseudo-header checksum is done by hardware logic and not
10074          * the offload processers, so make the chip do the pseudo-
10075          * header checksums on receive.  For transmit it is more
10076          * convenient to do the pseudo-header checksum in software
10077          * as Linux does that on transmit for us in all cases.
10078          */
10079         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10080
10081         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10082         if (tp->rxptpctl)
10083                 tw32(TG3_RX_PTP_CTL,
10084                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10085
10086         if (tg3_flag(tp, PTP_CAPABLE))
10087                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10088
10089         tw32(GRC_MODE, tp->grc_mode | val);
10090
10091         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10092          * south bridge limitation. As a workaround, Driver is setting MRRS
10093          * to 2048 instead of default 4096.
10094          */
10095         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10096             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10097                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10098                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10099         }
10100
10101         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10102         val = tr32(GRC_MISC_CFG);
10103         val &= ~0xff;
10104         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10105         tw32(GRC_MISC_CFG, val);
10106
10107         /* Initialize MBUF/DESC pool. */
10108         if (tg3_flag(tp, 5750_PLUS)) {
10109                 /* Do nothing.  */
10110         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10111                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10112                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10113                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10114                 else
10115                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10116                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10117                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10118         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10119                 int fw_len;
10120
10121                 fw_len = tp->fw_len;
10122                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10123                 tw32(BUFMGR_MB_POOL_ADDR,
10124                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10125                 tw32(BUFMGR_MB_POOL_SIZE,
10126                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10127         }
10128
10129         if (tp->dev->mtu <= ETH_DATA_LEN) {
10130                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10131                      tp->bufmgr_config.mbuf_read_dma_low_water);
10132                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10133                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10134                 tw32(BUFMGR_MB_HIGH_WATER,
10135                      tp->bufmgr_config.mbuf_high_water);
10136         } else {
10137                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10138                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10139                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10140                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10141                 tw32(BUFMGR_MB_HIGH_WATER,
10142                      tp->bufmgr_config.mbuf_high_water_jumbo);
10143         }
10144         tw32(BUFMGR_DMA_LOW_WATER,
10145              tp->bufmgr_config.dma_low_water);
10146         tw32(BUFMGR_DMA_HIGH_WATER,
10147              tp->bufmgr_config.dma_high_water);
10148
10149         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10150         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10151                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10152         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10153             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10154             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10155             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10156                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10157         tw32(BUFMGR_MODE, val);
10158         for (i = 0; i < 2000; i++) {
10159                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10160                         break;
10161                 udelay(10);
10162         }
10163         if (i >= 2000) {
10164                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10165                 return -ENODEV;
10166         }
10167
10168         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10169                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10170
10171         tg3_setup_rxbd_thresholds(tp);
10172
10173         /* Initialize TG3_BDINFO's at:
10174          *  RCVDBDI_STD_BD:     standard eth size rx ring
10175          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10176          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10177          *
10178          * like so:
10179          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10180          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10181          *                              ring attribute flags
10182          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10183          *
10184          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10185          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10186          *
10187          * The size of each ring is fixed in the firmware, but the location is
10188          * configurable.
10189          */
10190         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10191              ((u64) tpr->rx_std_mapping >> 32));
10192         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10193              ((u64) tpr->rx_std_mapping & 0xffffffff));
10194         if (!tg3_flag(tp, 5717_PLUS))
10195                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10196                      NIC_SRAM_RX_BUFFER_DESC);
10197
10198         /* Disable the mini ring */
10199         if (!tg3_flag(tp, 5705_PLUS))
10200                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10201                      BDINFO_FLAGS_DISABLED);
10202
10203         /* Program the jumbo buffer descriptor ring control
10204          * blocks on those devices that have them.
10205          */
10206         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10207             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10208
10209                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10210                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10211                              ((u64) tpr->rx_jmb_mapping >> 32));
10212                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10213                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10214                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10215                               BDINFO_FLAGS_MAXLEN_SHIFT;
10216                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10217                              val | BDINFO_FLAGS_USE_EXT_RECV);
10218                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10219                             tg3_flag(tp, 57765_CLASS) ||
10220                             tg3_asic_rev(tp) == ASIC_REV_5762)
10221                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10222                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10223                 } else {
10224                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10225                              BDINFO_FLAGS_DISABLED);
10226                 }
10227
10228                 if (tg3_flag(tp, 57765_PLUS)) {
10229                         val = TG3_RX_STD_RING_SIZE(tp);
10230                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10231                         val |= (TG3_RX_STD_DMA_SZ << 2);
10232                 } else
10233                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10234         } else
10235                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10236
10237         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10238
10239         tpr->rx_std_prod_idx = tp->rx_pending;
10240         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10241
10242         tpr->rx_jmb_prod_idx =
10243                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10244         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10245
10246         tg3_rings_reset(tp);
10247
10248         /* Initialize MAC address and backoff seed. */
10249         __tg3_set_mac_addr(tp, false);
10250
10251         /* MTU + ethernet header + FCS + optional VLAN tag */
10252         tw32(MAC_RX_MTU_SIZE,
10253              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10254
10255         /* The slot time is changed by tg3_setup_phy if we
10256          * run at gigabit with half duplex.
10257          */
10258         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10259               (6 << TX_LENGTHS_IPG_SHIFT) |
10260               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10261
10262         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10263             tg3_asic_rev(tp) == ASIC_REV_5762)
10264                 val |= tr32(MAC_TX_LENGTHS) &
10265                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10266                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10267
10268         tw32(MAC_TX_LENGTHS, val);
10269
10270         /* Receive rules. */
10271         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10272         tw32(RCVLPC_CONFIG, 0x0181);
10273
10274         /* Calculate RDMAC_MODE setting early, we need it to determine
10275          * the RCVLPC_STATE_ENABLE mask.
10276          */
10277         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10278                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10279                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10280                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10281                       RDMAC_MODE_LNGREAD_ENAB);
10282
10283         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10284                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10285
10286         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10287             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10288             tg3_asic_rev(tp) == ASIC_REV_57780)
10289                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10290                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10291                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10292
10293         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10294             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10295                 if (tg3_flag(tp, TSO_CAPABLE) &&
10296                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10297                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10298                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10299                            !tg3_flag(tp, IS_5788)) {
10300                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10301                 }
10302         }
10303
10304         if (tg3_flag(tp, PCI_EXPRESS))
10305                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10306
10307         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10308                 tp->dma_limit = 0;
10309                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10310                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10311                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10312                 }
10313         }
10314
10315         if (tg3_flag(tp, HW_TSO_1) ||
10316             tg3_flag(tp, HW_TSO_2) ||
10317             tg3_flag(tp, HW_TSO_3))
10318                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10319
10320         if (tg3_flag(tp, 57765_PLUS) ||
10321             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10322             tg3_asic_rev(tp) == ASIC_REV_57780)
10323                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10324
10325         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10326             tg3_asic_rev(tp) == ASIC_REV_5762)
10327                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10328
10329         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10330             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10331             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10332             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10333             tg3_flag(tp, 57765_PLUS)) {
10334                 u32 tgtreg;
10335
10336                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10337                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10338                 else
10339                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10340
10341                 val = tr32(tgtreg);
10342                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10343                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10344                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10345                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10346                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10347                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10348                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10349                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10350                 }
10351                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10352         }
10353
10354         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10355             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10356             tg3_asic_rev(tp) == ASIC_REV_5762) {
10357                 u32 tgtreg;
10358
10359                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10360                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10361                 else
10362                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10363
10364                 val = tr32(tgtreg);
10365                 tw32(tgtreg, val |
10366                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10367                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10368         }
10369
10370         /* Receive/send statistics. */
10371         if (tg3_flag(tp, 5750_PLUS)) {
10372                 val = tr32(RCVLPC_STATS_ENABLE);
10373                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10374                 tw32(RCVLPC_STATS_ENABLE, val);
10375         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10376                    tg3_flag(tp, TSO_CAPABLE)) {
10377                 val = tr32(RCVLPC_STATS_ENABLE);
10378                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10379                 tw32(RCVLPC_STATS_ENABLE, val);
10380         } else {
10381                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10382         }
10383         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10384         tw32(SNDDATAI_STATSENAB, 0xffffff);
10385         tw32(SNDDATAI_STATSCTRL,
10386              (SNDDATAI_SCTRL_ENABLE |
10387               SNDDATAI_SCTRL_FASTUPD));
10388
10389         /* Setup host coalescing engine. */
10390         tw32(HOSTCC_MODE, 0);
10391         for (i = 0; i < 2000; i++) {
10392                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10393                         break;
10394                 udelay(10);
10395         }
10396
10397         __tg3_set_coalesce(tp, &tp->coal);
10398
10399         if (!tg3_flag(tp, 5705_PLUS)) {
10400                 /* Status/statistics block address.  See tg3_timer,
10401                  * the tg3_periodic_fetch_stats call there, and
10402                  * tg3_get_stats to see how this works for 5705/5750 chips.
10403                  */
10404                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10405                      ((u64) tp->stats_mapping >> 32));
10406                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10407                      ((u64) tp->stats_mapping & 0xffffffff));
10408                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10409
10410                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10411
10412                 /* Clear statistics and status block memory areas */
10413                 for (i = NIC_SRAM_STATS_BLK;
10414                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10415                      i += sizeof(u32)) {
10416                         tg3_write_mem(tp, i, 0);
10417                         udelay(40);
10418                 }
10419         }
10420
10421         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10422
10423         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10424         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10425         if (!tg3_flag(tp, 5705_PLUS))
10426                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10427
10428         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10429                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10430                 /* reset to prevent losing 1st rx packet intermittently */
10431                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10432                 udelay(10);
10433         }
10434
10435         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10436                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10437                         MAC_MODE_FHDE_ENABLE;
10438         if (tg3_flag(tp, ENABLE_APE))
10439                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10440         if (!tg3_flag(tp, 5705_PLUS) &&
10441             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10442             tg3_asic_rev(tp) != ASIC_REV_5700)
10443                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10444         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10445         udelay(40);
10446
10447         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10448          * If TG3_FLAG_IS_NIC is zero, we should read the
10449          * register to preserve the GPIO settings for LOMs. The GPIOs,
10450          * whether used as inputs or outputs, are set by boot code after
10451          * reset.
10452          */
10453         if (!tg3_flag(tp, IS_NIC)) {
10454                 u32 gpio_mask;
10455
10456                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10457                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10458                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10459
10460                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10461                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10462                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10463
10464                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10465                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10466
10467                 tp->grc_local_ctrl &= ~gpio_mask;
10468                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10469
10470                 /* GPIO1 must be driven high for eeprom write protect */
10471                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10472                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10473                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10474         }
10475         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10476         udelay(100);
10477
10478         if (tg3_flag(tp, USING_MSIX)) {
10479                 val = tr32(MSGINT_MODE);
10480                 val |= MSGINT_MODE_ENABLE;
10481                 if (tp->irq_cnt > 1)
10482                         val |= MSGINT_MODE_MULTIVEC_EN;
10483                 if (!tg3_flag(tp, 1SHOT_MSI))
10484                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10485                 tw32(MSGINT_MODE, val);
10486         }
10487
10488         if (!tg3_flag(tp, 5705_PLUS)) {
10489                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10490                 udelay(40);
10491         }
10492
10493         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10494                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10495                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10496                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10497                WDMAC_MODE_LNGREAD_ENAB);
10498
10499         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10500             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10501                 if (tg3_flag(tp, TSO_CAPABLE) &&
10502                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10503                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10504                         /* nothing */
10505                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10506                            !tg3_flag(tp, IS_5788)) {
10507                         val |= WDMAC_MODE_RX_ACCEL;
10508                 }
10509         }
10510
10511         /* Enable host coalescing bug fix */
10512         if (tg3_flag(tp, 5755_PLUS))
10513                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10514
10515         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10516                 val |= WDMAC_MODE_BURST_ALL_DATA;
10517
10518         tw32_f(WDMAC_MODE, val);
10519         udelay(40);
10520
10521         if (tg3_flag(tp, PCIX_MODE)) {
10522                 u16 pcix_cmd;
10523
10524                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10525                                      &pcix_cmd);
10526                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10527                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10528                         pcix_cmd |= PCI_X_CMD_READ_2K;
10529                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10530                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10531                         pcix_cmd |= PCI_X_CMD_READ_2K;
10532                 }
10533                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10534                                       pcix_cmd);
10535         }
10536
10537         tw32_f(RDMAC_MODE, rdmac_mode);
10538         udelay(40);
10539
10540         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10541             tg3_asic_rev(tp) == ASIC_REV_5720) {
10542                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10543                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10544                                 break;
10545                 }
10546                 if (i < TG3_NUM_RDMA_CHANNELS) {
10547                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10548                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10549                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10550                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10551                 }
10552         }
10553
10554         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10555         if (!tg3_flag(tp, 5705_PLUS))
10556                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10557
10558         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10559                 tw32(SNDDATAC_MODE,
10560                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10561         else
10562                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10563
10564         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10565         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10566         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10567         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10568                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10569         tw32(RCVDBDI_MODE, val);
10570         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10571         if (tg3_flag(tp, HW_TSO_1) ||
10572             tg3_flag(tp, HW_TSO_2) ||
10573             tg3_flag(tp, HW_TSO_3))
10574                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10575         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10576         if (tg3_flag(tp, ENABLE_TSS))
10577                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10578         tw32(SNDBDI_MODE, val);
10579         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10580
10581         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10582                 err = tg3_load_5701_a0_firmware_fix(tp);
10583                 if (err)
10584                         return err;
10585         }
10586
10587         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10588                 /* Ignore any errors for the firmware download. If download
10589                  * fails, the device will operate with EEE disabled
10590                  */
10591                 tg3_load_57766_firmware(tp);
10592         }
10593
10594         if (tg3_flag(tp, TSO_CAPABLE)) {
10595                 err = tg3_load_tso_firmware(tp);
10596                 if (err)
10597                         return err;
10598         }
10599
10600         tp->tx_mode = TX_MODE_ENABLE;
10601
10602         if (tg3_flag(tp, 5755_PLUS) ||
10603             tg3_asic_rev(tp) == ASIC_REV_5906)
10604                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10605
10606         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10607             tg3_asic_rev(tp) == ASIC_REV_5762) {
10608                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10609                 tp->tx_mode &= ~val;
10610                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10611         }
10612
10613         tw32_f(MAC_TX_MODE, tp->tx_mode);
10614         udelay(100);
10615
10616         if (tg3_flag(tp, ENABLE_RSS)) {
10617                 u32 rss_key[10];
10618
10619                 tg3_rss_write_indir_tbl(tp);
10620
10621                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10622
10623                 for (i = 0; i < 10 ; i++)
10624                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10625         }
10626
10627         tp->rx_mode = RX_MODE_ENABLE;
10628         if (tg3_flag(tp, 5755_PLUS))
10629                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10630
10631         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10632                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10633
10634         if (tg3_flag(tp, ENABLE_RSS))
10635                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10636                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10637                                RX_MODE_RSS_IPV6_HASH_EN |
10638                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10639                                RX_MODE_RSS_IPV4_HASH_EN |
10640                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10641
10642         tw32_f(MAC_RX_MODE, tp->rx_mode);
10643         udelay(10);
10644
10645         tw32(MAC_LED_CTRL, tp->led_ctrl);
10646
10647         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10648         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10649                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10650                 udelay(10);
10651         }
10652         tw32_f(MAC_RX_MODE, tp->rx_mode);
10653         udelay(10);
10654
10655         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10656                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10657                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10658                         /* Set drive transmission level to 1.2V  */
10659                         /* only if the signal pre-emphasis bit is not set  */
10660                         val = tr32(MAC_SERDES_CFG);
10661                         val &= 0xfffff000;
10662                         val |= 0x880;
10663                         tw32(MAC_SERDES_CFG, val);
10664                 }
10665                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10666                         tw32(MAC_SERDES_CFG, 0x616000);
10667         }
10668
10669         /* Prevent chip from dropping frames when flow control
10670          * is enabled.
10671          */
10672         if (tg3_flag(tp, 57765_CLASS))
10673                 val = 1;
10674         else
10675                 val = 2;
10676         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10677
10678         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10679             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10680                 /* Use hardware link auto-negotiation */
10681                 tg3_flag_set(tp, HW_AUTONEG);
10682         }
10683
10684         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10685             tg3_asic_rev(tp) == ASIC_REV_5714) {
10686                 u32 tmp;
10687
10688                 tmp = tr32(SERDES_RX_CTRL);
10689                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10690                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10691                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10692                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10693         }
10694
10695         if (!tg3_flag(tp, USE_PHYLIB)) {
10696                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10697                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10698
10699                 err = tg3_setup_phy(tp, false);
10700                 if (err)
10701                         return err;
10702
10703                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10704                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10705                         u32 tmp;
10706
10707                         /* Clear CRC stats. */
10708                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10709                                 tg3_writephy(tp, MII_TG3_TEST1,
10710                                              tmp | MII_TG3_TEST1_CRC_EN);
10711                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10712                         }
10713                 }
10714         }
10715
10716         __tg3_set_rx_mode(tp->dev);
10717
10718         /* Initialize receive rules. */
10719         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10720         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10721         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10722         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10723
10724         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10725                 limit = 8;
10726         else
10727                 limit = 16;
10728         if (tg3_flag(tp, ENABLE_ASF))
10729                 limit -= 4;
10730         switch (limit) {
10731         case 16:
10732                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10733                 /* fall through */
10734         case 15:
10735                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10736                 /* fall through */
10737         case 14:
10738                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10739                 /* fall through */
10740         case 13:
10741                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10742                 /* fall through */
10743         case 12:
10744                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10745                 /* fall through */
10746         case 11:
10747                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10748                 /* fall through */
10749         case 10:
10750                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10751                 /* fall through */
10752         case 9:
10753                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10754                 /* fall through */
10755         case 8:
10756                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10757                 /* fall through */
10758         case 7:
10759                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10760                 /* fall through */
10761         case 6:
10762                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10763                 /* fall through */
10764         case 5:
10765                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10766                 /* fall through */
10767         case 4:
10768                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10769         case 3:
10770                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10771         case 2:
10772         case 1:
10773
10774         default:
10775                 break;
10776         }
10777
10778         if (tg3_flag(tp, ENABLE_APE))
10779                 /* Write our heartbeat update interval to APE. */
10780                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10781                                 APE_HOST_HEARTBEAT_INT_5SEC);
10782
10783         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10784
10785         return 0;
10786 }
10787
10788 /* Called at device open time to get the chip ready for
10789  * packet processing.  Invoked with tp->lock held.
10790  */
10791 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10792 {
10793         /* Chip may have been just powered on. If so, the boot code may still
10794          * be running initialization. Wait for it to finish to avoid races in
10795          * accessing the hardware.
10796          */
10797         tg3_enable_register_access(tp);
10798         tg3_poll_fw(tp);
10799
10800         tg3_switch_clocks(tp);
10801
10802         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10803
10804         return tg3_reset_hw(tp, reset_phy);
10805 }
10806
10807 #ifdef CONFIG_TIGON3_HWMON
10808 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10809 {
10810         int i;
10811
10812         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10813                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10814
10815                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10816                 off += len;
10817
10818                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10819                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10820                         memset(ocir, 0, TG3_OCIR_LEN);
10821         }
10822 }
10823
10824 /* sysfs attributes for hwmon */
10825 static ssize_t tg3_show_temp(struct device *dev,
10826                              struct device_attribute *devattr, char *buf)
10827 {
10828         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10829         struct tg3 *tp = dev_get_drvdata(dev);
10830         u32 temperature;
10831
10832         spin_lock_bh(&tp->lock);
10833         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10834                                 sizeof(temperature));
10835         spin_unlock_bh(&tp->lock);
10836         return sprintf(buf, "%u\n", temperature * 1000);
10837 }
10838
10839
10840 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10841                           TG3_TEMP_SENSOR_OFFSET);
10842 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10843                           TG3_TEMP_CAUTION_OFFSET);
10844 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10845                           TG3_TEMP_MAX_OFFSET);
10846
10847 static struct attribute *tg3_attrs[] = {
10848         &sensor_dev_attr_temp1_input.dev_attr.attr,
10849         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10850         &sensor_dev_attr_temp1_max.dev_attr.attr,
10851         NULL
10852 };
10853 ATTRIBUTE_GROUPS(tg3);
10854
10855 static void tg3_hwmon_close(struct tg3 *tp)
10856 {
10857         if (tp->hwmon_dev) {
10858                 hwmon_device_unregister(tp->hwmon_dev);
10859                 tp->hwmon_dev = NULL;
10860         }
10861 }
10862
10863 static void tg3_hwmon_open(struct tg3 *tp)
10864 {
10865         int i;
10866         u32 size = 0;
10867         struct pci_dev *pdev = tp->pdev;
10868         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10869
10870         tg3_sd_scan_scratchpad(tp, ocirs);
10871
10872         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10873                 if (!ocirs[i].src_data_length)
10874                         continue;
10875
10876                 size += ocirs[i].src_hdr_length;
10877                 size += ocirs[i].src_data_length;
10878         }
10879
10880         if (!size)
10881                 return;
10882
10883         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10884                                                           tp, tg3_groups);
10885         if (IS_ERR(tp->hwmon_dev)) {
10886                 tp->hwmon_dev = NULL;
10887                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10888         }
10889 }
10890 #else
10891 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10892 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10893 #endif /* CONFIG_TIGON3_HWMON */
10894
10895
10896 #define TG3_STAT_ADD32(PSTAT, REG) \
10897 do {    u32 __val = tr32(REG); \
10898         (PSTAT)->low += __val; \
10899         if ((PSTAT)->low < __val) \
10900                 (PSTAT)->high += 1; \
10901 } while (0)
10902
10903 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10904 {
10905         struct tg3_hw_stats *sp = tp->hw_stats;
10906
10907         if (!tp->link_up)
10908                 return;
10909
10910         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10911         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10912         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10913         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10914         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10915         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10916         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10917         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10918         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10919         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10920         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10921         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10922         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10923         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10924                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10925                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10926                 u32 val;
10927
10928                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10929                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10930                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10931                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10932         }
10933
10934         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10935         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10936         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10937         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10938         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10939         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10940         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10941         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10942         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10943         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10944         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10945         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10946         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10947         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10948
10949         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10950         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10951             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10952             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10953             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10954                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10955         } else {
10956                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10957                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10958                 if (val) {
10959                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10960                         sp->rx_discards.low += val;
10961                         if (sp->rx_discards.low < val)
10962                                 sp->rx_discards.high += 1;
10963                 }
10964                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10965         }
10966         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10967 }
10968
10969 static void tg3_chk_missed_msi(struct tg3 *tp)
10970 {
10971         u32 i;
10972
10973         for (i = 0; i < tp->irq_cnt; i++) {
10974                 struct tg3_napi *tnapi = &tp->napi[i];
10975
10976                 if (tg3_has_work(tnapi)) {
10977                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10978                             tnapi->last_tx_cons == tnapi->tx_cons) {
10979                                 if (tnapi->chk_msi_cnt < 1) {
10980                                         tnapi->chk_msi_cnt++;
10981                                         return;
10982                                 }
10983                                 tg3_msi(0, tnapi);
10984                         }
10985                 }
10986                 tnapi->chk_msi_cnt = 0;
10987                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10988                 tnapi->last_tx_cons = tnapi->tx_cons;
10989         }
10990 }
10991
10992 static void tg3_timer(struct timer_list *t)
10993 {
10994         struct tg3 *tp = from_timer(tp, t, timer);
10995
10996         spin_lock(&tp->lock);
10997
10998         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10999                 spin_unlock(&tp->lock);
11000                 goto restart_timer;
11001         }
11002
11003         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11004             tg3_flag(tp, 57765_CLASS))
11005                 tg3_chk_missed_msi(tp);
11006
11007         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11008                 /* BCM4785: Flush posted writes from GbE to host memory. */
11009                 tr32(HOSTCC_MODE);
11010         }
11011
11012         if (!tg3_flag(tp, TAGGED_STATUS)) {
11013                 /* All of this garbage is because when using non-tagged
11014                  * IRQ status the mailbox/status_block protocol the chip
11015                  * uses with the cpu is race prone.
11016                  */
11017                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11018                         tw32(GRC_LOCAL_CTRL,
11019                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11020                 } else {
11021                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11022                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11023                 }
11024
11025                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11026                         spin_unlock(&tp->lock);
11027                         tg3_reset_task_schedule(tp);
11028                         goto restart_timer;
11029                 }
11030         }
11031
11032         /* This part only runs once per second. */
11033         if (!--tp->timer_counter) {
11034                 if (tg3_flag(tp, 5705_PLUS))
11035                         tg3_periodic_fetch_stats(tp);
11036
11037                 if (tp->setlpicnt && !--tp->setlpicnt)
11038                         tg3_phy_eee_enable(tp);
11039
11040                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11041                         u32 mac_stat;
11042                         int phy_event;
11043
11044                         mac_stat = tr32(MAC_STATUS);
11045
11046                         phy_event = 0;
11047                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11048                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11049                                         phy_event = 1;
11050                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11051                                 phy_event = 1;
11052
11053                         if (phy_event)
11054                                 tg3_setup_phy(tp, false);
11055                 } else if (tg3_flag(tp, POLL_SERDES)) {
11056                         u32 mac_stat = tr32(MAC_STATUS);
11057                         int need_setup = 0;
11058
11059                         if (tp->link_up &&
11060                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11061                                 need_setup = 1;
11062                         }
11063                         if (!tp->link_up &&
11064                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11065                                          MAC_STATUS_SIGNAL_DET))) {
11066                                 need_setup = 1;
11067                         }
11068                         if (need_setup) {
11069                                 if (!tp->serdes_counter) {
11070                                         tw32_f(MAC_MODE,
11071                                              (tp->mac_mode &
11072                                               ~MAC_MODE_PORT_MODE_MASK));
11073                                         udelay(40);
11074                                         tw32_f(MAC_MODE, tp->mac_mode);
11075                                         udelay(40);
11076                                 }
11077                                 tg3_setup_phy(tp, false);
11078                         }
11079                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11080                            tg3_flag(tp, 5780_CLASS)) {
11081                         tg3_serdes_parallel_detect(tp);
11082                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11083                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11084                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11085                                          TG3_CPMU_STATUS_LINK_MASK);
11086
11087                         if (link_up != tp->link_up)
11088                                 tg3_setup_phy(tp, false);
11089                 }
11090
11091                 tp->timer_counter = tp->timer_multiplier;
11092         }
11093
11094         /* Heartbeat is only sent once every 2 seconds.
11095          *
11096          * The heartbeat is to tell the ASF firmware that the host
11097          * driver is still alive.  In the event that the OS crashes,
11098          * ASF needs to reset the hardware to free up the FIFO space
11099          * that may be filled with rx packets destined for the host.
11100          * If the FIFO is full, ASF will no longer function properly.
11101          *
11102          * Unintended resets have been reported on real time kernels
11103          * where the timer doesn't run on time.  Netpoll will also have
11104          * same problem.
11105          *
11106          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11107          * to check the ring condition when the heartbeat is expiring
11108          * before doing the reset.  This will prevent most unintended
11109          * resets.
11110          */
11111         if (!--tp->asf_counter) {
11112                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11113                         tg3_wait_for_event_ack(tp);
11114
11115                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11116                                       FWCMD_NICDRV_ALIVE3);
11117                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11118                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11119                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11120
11121                         tg3_generate_fw_event(tp);
11122                 }
11123                 tp->asf_counter = tp->asf_multiplier;
11124         }
11125
11126         /* Update the APE heartbeat every 5 seconds.*/
11127         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11128
11129         spin_unlock(&tp->lock);
11130
11131 restart_timer:
11132         tp->timer.expires = jiffies + tp->timer_offset;
11133         add_timer(&tp->timer);
11134 }
11135
11136 static void tg3_timer_init(struct tg3 *tp)
11137 {
11138         if (tg3_flag(tp, TAGGED_STATUS) &&
11139             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11140             !tg3_flag(tp, 57765_CLASS))
11141                 tp->timer_offset = HZ;
11142         else
11143                 tp->timer_offset = HZ / 10;
11144
11145         BUG_ON(tp->timer_offset > HZ);
11146
11147         tp->timer_multiplier = (HZ / tp->timer_offset);
11148         tp->asf_multiplier = (HZ / tp->timer_offset) *
11149                              TG3_FW_UPDATE_FREQ_SEC;
11150
11151         timer_setup(&tp->timer, tg3_timer, 0);
11152 }
11153
11154 static void tg3_timer_start(struct tg3 *tp)
11155 {
11156         tp->asf_counter   = tp->asf_multiplier;
11157         tp->timer_counter = tp->timer_multiplier;
11158
11159         tp->timer.expires = jiffies + tp->timer_offset;
11160         add_timer(&tp->timer);
11161 }
11162
11163 static void tg3_timer_stop(struct tg3 *tp)
11164 {
11165         del_timer_sync(&tp->timer);
11166 }
11167
11168 /* Restart hardware after configuration changes, self-test, etc.
11169  * Invoked with tp->lock held.
11170  */
11171 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11172         __releases(tp->lock)
11173         __acquires(tp->lock)
11174 {
11175         int err;
11176
11177         err = tg3_init_hw(tp, reset_phy);
11178         if (err) {
11179                 netdev_err(tp->dev,
11180                            "Failed to re-initialize device, aborting\n");
11181                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11182                 tg3_full_unlock(tp);
11183                 tg3_timer_stop(tp);
11184                 tp->irq_sync = 0;
11185                 tg3_napi_enable(tp);
11186                 dev_close(tp->dev);
11187                 tg3_full_lock(tp, 0);
11188         }
11189         return err;
11190 }
11191
11192 static void tg3_reset_task(struct work_struct *work)
11193 {
11194         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11195         int err;
11196
11197         rtnl_lock();
11198         tg3_full_lock(tp, 0);
11199
11200         if (!netif_running(tp->dev)) {
11201                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11202                 tg3_full_unlock(tp);
11203                 rtnl_unlock();
11204                 return;
11205         }
11206
11207         tg3_full_unlock(tp);
11208
11209         tg3_phy_stop(tp);
11210
11211         tg3_netif_stop(tp);
11212
11213         tg3_full_lock(tp, 1);
11214
11215         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11216                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11217                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11218                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11219                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11220         }
11221
11222         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11223         err = tg3_init_hw(tp, true);
11224         if (err)
11225                 goto out;
11226
11227         tg3_netif_start(tp);
11228
11229 out:
11230         tg3_full_unlock(tp);
11231
11232         if (!err)
11233                 tg3_phy_start(tp);
11234
11235         tg3_flag_clear(tp, RESET_TASK_PENDING);
11236         rtnl_unlock();
11237 }
11238
11239 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11240 {
11241         irq_handler_t fn;
11242         unsigned long flags;
11243         char *name;
11244         struct tg3_napi *tnapi = &tp->napi[irq_num];
11245
11246         if (tp->irq_cnt == 1)
11247                 name = tp->dev->name;
11248         else {
11249                 name = &tnapi->irq_lbl[0];
11250                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11251                         snprintf(name, IFNAMSIZ,
11252                                  "%s-txrx-%d", tp->dev->name, irq_num);
11253                 else if (tnapi->tx_buffers)
11254                         snprintf(name, IFNAMSIZ,
11255                                  "%s-tx-%d", tp->dev->name, irq_num);
11256                 else if (tnapi->rx_rcb)
11257                         snprintf(name, IFNAMSIZ,
11258                                  "%s-rx-%d", tp->dev->name, irq_num);
11259                 else
11260                         snprintf(name, IFNAMSIZ,
11261                                  "%s-%d", tp->dev->name, irq_num);
11262                 name[IFNAMSIZ-1] = 0;
11263         }
11264
11265         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11266                 fn = tg3_msi;
11267                 if (tg3_flag(tp, 1SHOT_MSI))
11268                         fn = tg3_msi_1shot;
11269                 flags = 0;
11270         } else {
11271                 fn = tg3_interrupt;
11272                 if (tg3_flag(tp, TAGGED_STATUS))
11273                         fn = tg3_interrupt_tagged;
11274                 flags = IRQF_SHARED;
11275         }
11276
11277         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11278 }
11279
11280 static int tg3_test_interrupt(struct tg3 *tp)
11281 {
11282         struct tg3_napi *tnapi = &tp->napi[0];
11283         struct net_device *dev = tp->dev;
11284         int err, i, intr_ok = 0;
11285         u32 val;
11286
11287         if (!netif_running(dev))
11288                 return -ENODEV;
11289
11290         tg3_disable_ints(tp);
11291
11292         free_irq(tnapi->irq_vec, tnapi);
11293
11294         /*
11295          * Turn off MSI one shot mode.  Otherwise this test has no
11296          * observable way to know whether the interrupt was delivered.
11297          */
11298         if (tg3_flag(tp, 57765_PLUS)) {
11299                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11300                 tw32(MSGINT_MODE, val);
11301         }
11302
11303         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11304                           IRQF_SHARED, dev->name, tnapi);
11305         if (err)
11306                 return err;
11307
11308         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11309         tg3_enable_ints(tp);
11310
11311         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11312                tnapi->coal_now);
11313
11314         for (i = 0; i < 5; i++) {
11315                 u32 int_mbox, misc_host_ctrl;
11316
11317                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11318                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11319
11320                 if ((int_mbox != 0) ||
11321                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11322                         intr_ok = 1;
11323                         break;
11324                 }
11325
11326                 if (tg3_flag(tp, 57765_PLUS) &&
11327                     tnapi->hw_status->status_tag != tnapi->last_tag)
11328                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11329
11330                 msleep(10);
11331         }
11332
11333         tg3_disable_ints(tp);
11334
11335         free_irq(tnapi->irq_vec, tnapi);
11336
11337         err = tg3_request_irq(tp, 0);
11338
11339         if (err)
11340                 return err;
11341
11342         if (intr_ok) {
11343                 /* Reenable MSI one shot mode. */
11344                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11345                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11346                         tw32(MSGINT_MODE, val);
11347                 }
11348                 return 0;
11349         }
11350
11351         return -EIO;
11352 }
11353
11354 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11355  * successfully restored
11356  */
11357 static int tg3_test_msi(struct tg3 *tp)
11358 {
11359         int err;
11360         u16 pci_cmd;
11361
11362         if (!tg3_flag(tp, USING_MSI))
11363                 return 0;
11364
11365         /* Turn off SERR reporting in case MSI terminates with Master
11366          * Abort.
11367          */
11368         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11369         pci_write_config_word(tp->pdev, PCI_COMMAND,
11370                               pci_cmd & ~PCI_COMMAND_SERR);
11371
11372         err = tg3_test_interrupt(tp);
11373
11374         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11375
11376         if (!err)
11377                 return 0;
11378
11379         /* other failures */
11380         if (err != -EIO)
11381                 return err;
11382
11383         /* MSI test failed, go back to INTx mode */
11384         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11385                     "to INTx mode. Please report this failure to the PCI "
11386                     "maintainer and include system chipset information\n");
11387
11388         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11389
11390         pci_disable_msi(tp->pdev);
11391
11392         tg3_flag_clear(tp, USING_MSI);
11393         tp->napi[0].irq_vec = tp->pdev->irq;
11394
11395         err = tg3_request_irq(tp, 0);
11396         if (err)
11397                 return err;
11398
11399         /* Need to reset the chip because the MSI cycle may have terminated
11400          * with Master Abort.
11401          */
11402         tg3_full_lock(tp, 1);
11403
11404         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11405         err = tg3_init_hw(tp, true);
11406
11407         tg3_full_unlock(tp);
11408
11409         if (err)
11410                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11411
11412         return err;
11413 }
11414
11415 static int tg3_request_firmware(struct tg3 *tp)
11416 {
11417         const struct tg3_firmware_hdr *fw_hdr;
11418
11419         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11420                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11421                            tp->fw_needed);
11422                 return -ENOENT;
11423         }
11424
11425         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11426
11427         /* Firmware blob starts with version numbers, followed by
11428          * start address and _full_ length including BSS sections
11429          * (which must be longer than the actual data, of course
11430          */
11431
11432         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11433         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11434                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11435                            tp->fw_len, tp->fw_needed);
11436                 release_firmware(tp->fw);
11437                 tp->fw = NULL;
11438                 return -EINVAL;
11439         }
11440
11441         /* We no longer need firmware; we have it. */
11442         tp->fw_needed = NULL;
11443         return 0;
11444 }
11445
11446 static u32 tg3_irq_count(struct tg3 *tp)
11447 {
11448         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11449
11450         if (irq_cnt > 1) {
11451                 /* We want as many rx rings enabled as there are cpus.
11452                  * In multiqueue MSI-X mode, the first MSI-X vector
11453                  * only deals with link interrupts, etc, so we add
11454                  * one to the number of vectors we are requesting.
11455                  */
11456                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11457         }
11458
11459         return irq_cnt;
11460 }
11461
11462 static bool tg3_enable_msix(struct tg3 *tp)
11463 {
11464         int i, rc;
11465         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11466
11467         tp->txq_cnt = tp->txq_req;
11468         tp->rxq_cnt = tp->rxq_req;
11469         if (!tp->rxq_cnt)
11470                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11471         if (tp->rxq_cnt > tp->rxq_max)
11472                 tp->rxq_cnt = tp->rxq_max;
11473
11474         /* Disable multiple TX rings by default.  Simple round-robin hardware
11475          * scheduling of the TX rings can cause starvation of rings with
11476          * small packets when other rings have TSO or jumbo packets.
11477          */
11478         if (!tp->txq_req)
11479                 tp->txq_cnt = 1;
11480
11481         tp->irq_cnt = tg3_irq_count(tp);
11482
11483         for (i = 0; i < tp->irq_max; i++) {
11484                 msix_ent[i].entry  = i;
11485                 msix_ent[i].vector = 0;
11486         }
11487
11488         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11489         if (rc < 0) {
11490                 return false;
11491         } else if (rc < tp->irq_cnt) {
11492                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11493                               tp->irq_cnt, rc);
11494                 tp->irq_cnt = rc;
11495                 tp->rxq_cnt = max(rc - 1, 1);
11496                 if (tp->txq_cnt)
11497                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11498         }
11499
11500         for (i = 0; i < tp->irq_max; i++)
11501                 tp->napi[i].irq_vec = msix_ent[i].vector;
11502
11503         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11504                 pci_disable_msix(tp->pdev);
11505                 return false;
11506         }
11507
11508         if (tp->irq_cnt == 1)
11509                 return true;
11510
11511         tg3_flag_set(tp, ENABLE_RSS);
11512
11513         if (tp->txq_cnt > 1)
11514                 tg3_flag_set(tp, ENABLE_TSS);
11515
11516         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11517
11518         return true;
11519 }
11520
11521 static void tg3_ints_init(struct tg3 *tp)
11522 {
11523         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11524             !tg3_flag(tp, TAGGED_STATUS)) {
11525                 /* All MSI supporting chips should support tagged
11526                  * status.  Assert that this is the case.
11527                  */
11528                 netdev_warn(tp->dev,
11529                             "MSI without TAGGED_STATUS? Not using MSI\n");
11530                 goto defcfg;
11531         }
11532
11533         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11534                 tg3_flag_set(tp, USING_MSIX);
11535         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11536                 tg3_flag_set(tp, USING_MSI);
11537
11538         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11539                 u32 msi_mode = tr32(MSGINT_MODE);
11540                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11541                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11542                 if (!tg3_flag(tp, 1SHOT_MSI))
11543                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11544                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11545         }
11546 defcfg:
11547         if (!tg3_flag(tp, USING_MSIX)) {
11548                 tp->irq_cnt = 1;
11549                 tp->napi[0].irq_vec = tp->pdev->irq;
11550         }
11551
11552         if (tp->irq_cnt == 1) {
11553                 tp->txq_cnt = 1;
11554                 tp->rxq_cnt = 1;
11555                 netif_set_real_num_tx_queues(tp->dev, 1);
11556                 netif_set_real_num_rx_queues(tp->dev, 1);
11557         }
11558 }
11559
11560 static void tg3_ints_fini(struct tg3 *tp)
11561 {
11562         if (tg3_flag(tp, USING_MSIX))
11563                 pci_disable_msix(tp->pdev);
11564         else if (tg3_flag(tp, USING_MSI))
11565                 pci_disable_msi(tp->pdev);
11566         tg3_flag_clear(tp, USING_MSI);
11567         tg3_flag_clear(tp, USING_MSIX);
11568         tg3_flag_clear(tp, ENABLE_RSS);
11569         tg3_flag_clear(tp, ENABLE_TSS);
11570 }
11571
11572 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11573                      bool init)
11574 {
11575         struct net_device *dev = tp->dev;
11576         int i, err;
11577
11578         /*
11579          * Setup interrupts first so we know how
11580          * many NAPI resources to allocate
11581          */
11582         tg3_ints_init(tp);
11583
11584         tg3_rss_check_indir_tbl(tp);
11585
11586         /* The placement of this call is tied
11587          * to the setup and use of Host TX descriptors.
11588          */
11589         err = tg3_alloc_consistent(tp);
11590         if (err)
11591                 goto out_ints_fini;
11592
11593         tg3_napi_init(tp);
11594
11595         tg3_napi_enable(tp);
11596
11597         for (i = 0; i < tp->irq_cnt; i++) {
11598                 err = tg3_request_irq(tp, i);
11599                 if (err) {
11600                         for (i--; i >= 0; i--) {
11601                                 struct tg3_napi *tnapi = &tp->napi[i];
11602
11603                                 free_irq(tnapi->irq_vec, tnapi);
11604                         }
11605                         goto out_napi_fini;
11606                 }
11607         }
11608
11609         tg3_full_lock(tp, 0);
11610
11611         if (init)
11612                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11613
11614         err = tg3_init_hw(tp, reset_phy);
11615         if (err) {
11616                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11617                 tg3_free_rings(tp);
11618         }
11619
11620         tg3_full_unlock(tp);
11621
11622         if (err)
11623                 goto out_free_irq;
11624
11625         if (test_irq && tg3_flag(tp, USING_MSI)) {
11626                 err = tg3_test_msi(tp);
11627
11628                 if (err) {
11629                         tg3_full_lock(tp, 0);
11630                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11631                         tg3_free_rings(tp);
11632                         tg3_full_unlock(tp);
11633
11634                         goto out_napi_fini;
11635                 }
11636
11637                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11638                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11639
11640                         tw32(PCIE_TRANSACTION_CFG,
11641                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11642                 }
11643         }
11644
11645         tg3_phy_start(tp);
11646
11647         tg3_hwmon_open(tp);
11648
11649         tg3_full_lock(tp, 0);
11650
11651         tg3_timer_start(tp);
11652         tg3_flag_set(tp, INIT_COMPLETE);
11653         tg3_enable_ints(tp);
11654
11655         tg3_ptp_resume(tp);
11656
11657         tg3_full_unlock(tp);
11658
11659         netif_tx_start_all_queues(dev);
11660
11661         /*
11662          * Reset loopback feature if it was turned on while the device was down
11663          * make sure that it's installed properly now.
11664          */
11665         if (dev->features & NETIF_F_LOOPBACK)
11666                 tg3_set_loopback(dev, dev->features);
11667
11668         return 0;
11669
11670 out_free_irq:
11671         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11672                 struct tg3_napi *tnapi = &tp->napi[i];
11673                 free_irq(tnapi->irq_vec, tnapi);
11674         }
11675
11676 out_napi_fini:
11677         tg3_napi_disable(tp);
11678         tg3_napi_fini(tp);
11679         tg3_free_consistent(tp);
11680
11681 out_ints_fini:
11682         tg3_ints_fini(tp);
11683
11684         return err;
11685 }
11686
11687 static void tg3_stop(struct tg3 *tp)
11688 {
11689         int i;
11690
11691         tg3_reset_task_cancel(tp);
11692         tg3_netif_stop(tp);
11693
11694         tg3_timer_stop(tp);
11695
11696         tg3_hwmon_close(tp);
11697
11698         tg3_phy_stop(tp);
11699
11700         tg3_full_lock(tp, 1);
11701
11702         tg3_disable_ints(tp);
11703
11704         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11705         tg3_free_rings(tp);
11706         tg3_flag_clear(tp, INIT_COMPLETE);
11707
11708         tg3_full_unlock(tp);
11709
11710         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11711                 struct tg3_napi *tnapi = &tp->napi[i];
11712                 free_irq(tnapi->irq_vec, tnapi);
11713         }
11714
11715         tg3_ints_fini(tp);
11716
11717         tg3_napi_fini(tp);
11718
11719         tg3_free_consistent(tp);
11720 }
11721
11722 static int tg3_open(struct net_device *dev)
11723 {
11724         struct tg3 *tp = netdev_priv(dev);
11725         int err;
11726
11727         if (tp->pcierr_recovery) {
11728                 netdev_err(dev, "Failed to open device. PCI error recovery "
11729                            "in progress\n");
11730                 return -EAGAIN;
11731         }
11732
11733         if (tp->fw_needed) {
11734                 err = tg3_request_firmware(tp);
11735                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11736                         if (err) {
11737                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11738                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11739                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11740                                 netdev_warn(tp->dev, "EEE capability restored\n");
11741                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11742                         }
11743                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11744                         if (err)
11745                                 return err;
11746                 } else if (err) {
11747                         netdev_warn(tp->dev, "TSO capability disabled\n");
11748                         tg3_flag_clear(tp, TSO_CAPABLE);
11749                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11750                         netdev_notice(tp->dev, "TSO capability restored\n");
11751                         tg3_flag_set(tp, TSO_CAPABLE);
11752                 }
11753         }
11754
11755         tg3_carrier_off(tp);
11756
11757         err = tg3_power_up(tp);
11758         if (err)
11759                 return err;
11760
11761         tg3_full_lock(tp, 0);
11762
11763         tg3_disable_ints(tp);
11764         tg3_flag_clear(tp, INIT_COMPLETE);
11765
11766         tg3_full_unlock(tp);
11767
11768         err = tg3_start(tp,
11769                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11770                         true, true);
11771         if (err) {
11772                 tg3_frob_aux_power(tp, false);
11773                 pci_set_power_state(tp->pdev, PCI_D3hot);
11774         }
11775
11776         return err;
11777 }
11778
11779 static int tg3_close(struct net_device *dev)
11780 {
11781         struct tg3 *tp = netdev_priv(dev);
11782
11783         if (tp->pcierr_recovery) {
11784                 netdev_err(dev, "Failed to close device. PCI error recovery "
11785                            "in progress\n");
11786                 return -EAGAIN;
11787         }
11788
11789         tg3_stop(tp);
11790
11791         if (pci_device_is_present(tp->pdev)) {
11792                 tg3_power_down_prepare(tp);
11793
11794                 tg3_carrier_off(tp);
11795         }
11796         return 0;
11797 }
11798
11799 static inline u64 get_stat64(tg3_stat64_t *val)
11800 {
11801        return ((u64)val->high << 32) | ((u64)val->low);
11802 }
11803
11804 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11805 {
11806         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11807
11808         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11809             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11810              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11811                 u32 val;
11812
11813                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11814                         tg3_writephy(tp, MII_TG3_TEST1,
11815                                      val | MII_TG3_TEST1_CRC_EN);
11816                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11817                 } else
11818                         val = 0;
11819
11820                 tp->phy_crc_errors += val;
11821
11822                 return tp->phy_crc_errors;
11823         }
11824
11825         return get_stat64(&hw_stats->rx_fcs_errors);
11826 }
11827
11828 #define ESTAT_ADD(member) \
11829         estats->member =        old_estats->member + \
11830                                 get_stat64(&hw_stats->member)
11831
11832 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11833 {
11834         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11835         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11836
11837         ESTAT_ADD(rx_octets);
11838         ESTAT_ADD(rx_fragments);
11839         ESTAT_ADD(rx_ucast_packets);
11840         ESTAT_ADD(rx_mcast_packets);
11841         ESTAT_ADD(rx_bcast_packets);
11842         ESTAT_ADD(rx_fcs_errors);
11843         ESTAT_ADD(rx_align_errors);
11844         ESTAT_ADD(rx_xon_pause_rcvd);
11845         ESTAT_ADD(rx_xoff_pause_rcvd);
11846         ESTAT_ADD(rx_mac_ctrl_rcvd);
11847         ESTAT_ADD(rx_xoff_entered);
11848         ESTAT_ADD(rx_frame_too_long_errors);
11849         ESTAT_ADD(rx_jabbers);
11850         ESTAT_ADD(rx_undersize_packets);
11851         ESTAT_ADD(rx_in_length_errors);
11852         ESTAT_ADD(rx_out_length_errors);
11853         ESTAT_ADD(rx_64_or_less_octet_packets);
11854         ESTAT_ADD(rx_65_to_127_octet_packets);
11855         ESTAT_ADD(rx_128_to_255_octet_packets);
11856         ESTAT_ADD(rx_256_to_511_octet_packets);
11857         ESTAT_ADD(rx_512_to_1023_octet_packets);
11858         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11859         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11860         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11861         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11862         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11863
11864         ESTAT_ADD(tx_octets);
11865         ESTAT_ADD(tx_collisions);
11866         ESTAT_ADD(tx_xon_sent);
11867         ESTAT_ADD(tx_xoff_sent);
11868         ESTAT_ADD(tx_flow_control);
11869         ESTAT_ADD(tx_mac_errors);
11870         ESTAT_ADD(tx_single_collisions);
11871         ESTAT_ADD(tx_mult_collisions);
11872         ESTAT_ADD(tx_deferred);
11873         ESTAT_ADD(tx_excessive_collisions);
11874         ESTAT_ADD(tx_late_collisions);
11875         ESTAT_ADD(tx_collide_2times);
11876         ESTAT_ADD(tx_collide_3times);
11877         ESTAT_ADD(tx_collide_4times);
11878         ESTAT_ADD(tx_collide_5times);
11879         ESTAT_ADD(tx_collide_6times);
11880         ESTAT_ADD(tx_collide_7times);
11881         ESTAT_ADD(tx_collide_8times);
11882         ESTAT_ADD(tx_collide_9times);
11883         ESTAT_ADD(tx_collide_10times);
11884         ESTAT_ADD(tx_collide_11times);
11885         ESTAT_ADD(tx_collide_12times);
11886         ESTAT_ADD(tx_collide_13times);
11887         ESTAT_ADD(tx_collide_14times);
11888         ESTAT_ADD(tx_collide_15times);
11889         ESTAT_ADD(tx_ucast_packets);
11890         ESTAT_ADD(tx_mcast_packets);
11891         ESTAT_ADD(tx_bcast_packets);
11892         ESTAT_ADD(tx_carrier_sense_errors);
11893         ESTAT_ADD(tx_discards);
11894         ESTAT_ADD(tx_errors);
11895
11896         ESTAT_ADD(dma_writeq_full);
11897         ESTAT_ADD(dma_write_prioq_full);
11898         ESTAT_ADD(rxbds_empty);
11899         ESTAT_ADD(rx_discards);
11900         ESTAT_ADD(rx_errors);
11901         ESTAT_ADD(rx_threshold_hit);
11902
11903         ESTAT_ADD(dma_readq_full);
11904         ESTAT_ADD(dma_read_prioq_full);
11905         ESTAT_ADD(tx_comp_queue_full);
11906
11907         ESTAT_ADD(ring_set_send_prod_index);
11908         ESTAT_ADD(ring_status_update);
11909         ESTAT_ADD(nic_irqs);
11910         ESTAT_ADD(nic_avoided_irqs);
11911         ESTAT_ADD(nic_tx_threshold_hit);
11912
11913         ESTAT_ADD(mbuf_lwm_thresh_hit);
11914 }
11915
11916 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11917 {
11918         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11919         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11920
11921         stats->rx_packets = old_stats->rx_packets +
11922                 get_stat64(&hw_stats->rx_ucast_packets) +
11923                 get_stat64(&hw_stats->rx_mcast_packets) +
11924                 get_stat64(&hw_stats->rx_bcast_packets);
11925
11926         stats->tx_packets = old_stats->tx_packets +
11927                 get_stat64(&hw_stats->tx_ucast_packets) +
11928                 get_stat64(&hw_stats->tx_mcast_packets) +
11929                 get_stat64(&hw_stats->tx_bcast_packets);
11930
11931         stats->rx_bytes = old_stats->rx_bytes +
11932                 get_stat64(&hw_stats->rx_octets);
11933         stats->tx_bytes = old_stats->tx_bytes +
11934                 get_stat64(&hw_stats->tx_octets);
11935
11936         stats->rx_errors = old_stats->rx_errors +
11937                 get_stat64(&hw_stats->rx_errors);
11938         stats->tx_errors = old_stats->tx_errors +
11939                 get_stat64(&hw_stats->tx_errors) +
11940                 get_stat64(&hw_stats->tx_mac_errors) +
11941                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11942                 get_stat64(&hw_stats->tx_discards);
11943
11944         stats->multicast = old_stats->multicast +
11945                 get_stat64(&hw_stats->rx_mcast_packets);
11946         stats->collisions = old_stats->collisions +
11947                 get_stat64(&hw_stats->tx_collisions);
11948
11949         stats->rx_length_errors = old_stats->rx_length_errors +
11950                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11951                 get_stat64(&hw_stats->rx_undersize_packets);
11952
11953         stats->rx_frame_errors = old_stats->rx_frame_errors +
11954                 get_stat64(&hw_stats->rx_align_errors);
11955         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11956                 get_stat64(&hw_stats->tx_discards);
11957         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11958                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11959
11960         stats->rx_crc_errors = old_stats->rx_crc_errors +
11961                 tg3_calc_crc_errors(tp);
11962
11963         stats->rx_missed_errors = old_stats->rx_missed_errors +
11964                 get_stat64(&hw_stats->rx_discards);
11965
11966         stats->rx_dropped = tp->rx_dropped;
11967         stats->tx_dropped = tp->tx_dropped;
11968 }
11969
11970 static int tg3_get_regs_len(struct net_device *dev)
11971 {
11972         return TG3_REG_BLK_SIZE;
11973 }
11974
11975 static void tg3_get_regs(struct net_device *dev,
11976                 struct ethtool_regs *regs, void *_p)
11977 {
11978         struct tg3 *tp = netdev_priv(dev);
11979
11980         regs->version = 0;
11981
11982         memset(_p, 0, TG3_REG_BLK_SIZE);
11983
11984         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11985                 return;
11986
11987         tg3_full_lock(tp, 0);
11988
11989         tg3_dump_legacy_regs(tp, (u32 *)_p);
11990
11991         tg3_full_unlock(tp);
11992 }
11993
11994 static int tg3_get_eeprom_len(struct net_device *dev)
11995 {
11996         struct tg3 *tp = netdev_priv(dev);
11997
11998         return tp->nvram_size;
11999 }
12000
12001 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12002 {
12003         struct tg3 *tp = netdev_priv(dev);
12004         int ret, cpmu_restore = 0;
12005         u8  *pd;
12006         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12007         __be32 val;
12008
12009         if (tg3_flag(tp, NO_NVRAM))
12010                 return -EINVAL;
12011
12012         offset = eeprom->offset;
12013         len = eeprom->len;
12014         eeprom->len = 0;
12015
12016         eeprom->magic = TG3_EEPROM_MAGIC;
12017
12018         /* Override clock, link aware and link idle modes */
12019         if (tg3_flag(tp, CPMU_PRESENT)) {
12020                 cpmu_val = tr32(TG3_CPMU_CTRL);
12021                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12022                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12023                         tw32(TG3_CPMU_CTRL, cpmu_val &
12024                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12025                                              CPMU_CTRL_LINK_IDLE_MODE));
12026                         cpmu_restore = 1;
12027                 }
12028         }
12029         tg3_override_clk(tp);
12030
12031         if (offset & 3) {
12032                 /* adjustments to start on required 4 byte boundary */
12033                 b_offset = offset & 3;
12034                 b_count = 4 - b_offset;
12035                 if (b_count > len) {
12036                         /* i.e. offset=1 len=2 */
12037                         b_count = len;
12038                 }
12039                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12040                 if (ret)
12041                         goto eeprom_done;
12042                 memcpy(data, ((char *)&val) + b_offset, b_count);
12043                 len -= b_count;
12044                 offset += b_count;
12045                 eeprom->len += b_count;
12046         }
12047
12048         /* read bytes up to the last 4 byte boundary */
12049         pd = &data[eeprom->len];
12050         for (i = 0; i < (len - (len & 3)); i += 4) {
12051                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12052                 if (ret) {
12053                         if (i)
12054                                 i -= 4;
12055                         eeprom->len += i;
12056                         goto eeprom_done;
12057                 }
12058                 memcpy(pd + i, &val, 4);
12059                 if (need_resched()) {
12060                         if (signal_pending(current)) {
12061                                 eeprom->len += i;
12062                                 ret = -EINTR;
12063                                 goto eeprom_done;
12064                         }
12065                         cond_resched();
12066                 }
12067         }
12068         eeprom->len += i;
12069
12070         if (len & 3) {
12071                 /* read last bytes not ending on 4 byte boundary */
12072                 pd = &data[eeprom->len];
12073                 b_count = len & 3;
12074                 b_offset = offset + len - b_count;
12075                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12076                 if (ret)
12077                         goto eeprom_done;
12078                 memcpy(pd, &val, b_count);
12079                 eeprom->len += b_count;
12080         }
12081         ret = 0;
12082
12083 eeprom_done:
12084         /* Restore clock, link aware and link idle modes */
12085         tg3_restore_clk(tp);
12086         if (cpmu_restore)
12087                 tw32(TG3_CPMU_CTRL, cpmu_val);
12088
12089         return ret;
12090 }
12091
12092 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12093 {
12094         struct tg3 *tp = netdev_priv(dev);
12095         int ret;
12096         u32 offset, len, b_offset, odd_len;
12097         u8 *buf;
12098         __be32 start = 0, end;
12099
12100         if (tg3_flag(tp, NO_NVRAM) ||
12101             eeprom->magic != TG3_EEPROM_MAGIC)
12102                 return -EINVAL;
12103
12104         offset = eeprom->offset;
12105         len = eeprom->len;
12106
12107         if ((b_offset = (offset & 3))) {
12108                 /* adjustments to start on required 4 byte boundary */
12109                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12110                 if (ret)
12111                         return ret;
12112                 len += b_offset;
12113                 offset &= ~3;
12114                 if (len < 4)
12115                         len = 4;
12116         }
12117
12118         odd_len = 0;
12119         if (len & 3) {
12120                 /* adjustments to end on required 4 byte boundary */
12121                 odd_len = 1;
12122                 len = (len + 3) & ~3;
12123                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12124                 if (ret)
12125                         return ret;
12126         }
12127
12128         buf = data;
12129         if (b_offset || odd_len) {
12130                 buf = kmalloc(len, GFP_KERNEL);
12131                 if (!buf)
12132                         return -ENOMEM;
12133                 if (b_offset)
12134                         memcpy(buf, &start, 4);
12135                 if (odd_len)
12136                         memcpy(buf+len-4, &end, 4);
12137                 memcpy(buf + b_offset, data, eeprom->len);
12138         }
12139
12140         ret = tg3_nvram_write_block(tp, offset, len, buf);
12141
12142         if (buf != data)
12143                 kfree(buf);
12144
12145         return ret;
12146 }
12147
12148 static int tg3_get_link_ksettings(struct net_device *dev,
12149                                   struct ethtool_link_ksettings *cmd)
12150 {
12151         struct tg3 *tp = netdev_priv(dev);
12152         u32 supported, advertising;
12153
12154         if (tg3_flag(tp, USE_PHYLIB)) {
12155                 struct phy_device *phydev;
12156                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12157                         return -EAGAIN;
12158                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12159                 phy_ethtool_ksettings_get(phydev, cmd);
12160
12161                 return 0;
12162         }
12163
12164         supported = (SUPPORTED_Autoneg);
12165
12166         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12167                 supported |= (SUPPORTED_1000baseT_Half |
12168                               SUPPORTED_1000baseT_Full);
12169
12170         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12171                 supported |= (SUPPORTED_100baseT_Half |
12172                               SUPPORTED_100baseT_Full |
12173                               SUPPORTED_10baseT_Half |
12174                               SUPPORTED_10baseT_Full |
12175                               SUPPORTED_TP);
12176                 cmd->base.port = PORT_TP;
12177         } else {
12178                 supported |= SUPPORTED_FIBRE;
12179                 cmd->base.port = PORT_FIBRE;
12180         }
12181         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12182                                                 supported);
12183
12184         advertising = tp->link_config.advertising;
12185         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12186                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12187                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12188                                 advertising |= ADVERTISED_Pause;
12189                         } else {
12190                                 advertising |= ADVERTISED_Pause |
12191                                         ADVERTISED_Asym_Pause;
12192                         }
12193                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12194                         advertising |= ADVERTISED_Asym_Pause;
12195                 }
12196         }
12197         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12198                                                 advertising);
12199
12200         if (netif_running(dev) && tp->link_up) {
12201                 cmd->base.speed = tp->link_config.active_speed;
12202                 cmd->base.duplex = tp->link_config.active_duplex;
12203                 ethtool_convert_legacy_u32_to_link_mode(
12204                         cmd->link_modes.lp_advertising,
12205                         tp->link_config.rmt_adv);
12206
12207                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12208                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12209                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12210                         else
12211                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12212                 }
12213         } else {
12214                 cmd->base.speed = SPEED_UNKNOWN;
12215                 cmd->base.duplex = DUPLEX_UNKNOWN;
12216                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12217         }
12218         cmd->base.phy_address = tp->phy_addr;
12219         cmd->base.autoneg = tp->link_config.autoneg;
12220         return 0;
12221 }
12222
12223 static int tg3_set_link_ksettings(struct net_device *dev,
12224                                   const struct ethtool_link_ksettings *cmd)
12225 {
12226         struct tg3 *tp = netdev_priv(dev);
12227         u32 speed = cmd->base.speed;
12228         u32 advertising;
12229
12230         if (tg3_flag(tp, USE_PHYLIB)) {
12231                 struct phy_device *phydev;
12232                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12233                         return -EAGAIN;
12234                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12235                 return phy_ethtool_ksettings_set(phydev, cmd);
12236         }
12237
12238         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12239             cmd->base.autoneg != AUTONEG_DISABLE)
12240                 return -EINVAL;
12241
12242         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12243             cmd->base.duplex != DUPLEX_FULL &&
12244             cmd->base.duplex != DUPLEX_HALF)
12245                 return -EINVAL;
12246
12247         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12248                                                 cmd->link_modes.advertising);
12249
12250         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12251                 u32 mask = ADVERTISED_Autoneg |
12252                            ADVERTISED_Pause |
12253                            ADVERTISED_Asym_Pause;
12254
12255                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12256                         mask |= ADVERTISED_1000baseT_Half |
12257                                 ADVERTISED_1000baseT_Full;
12258
12259                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12260                         mask |= ADVERTISED_100baseT_Half |
12261                                 ADVERTISED_100baseT_Full |
12262                                 ADVERTISED_10baseT_Half |
12263                                 ADVERTISED_10baseT_Full |
12264                                 ADVERTISED_TP;
12265                 else
12266                         mask |= ADVERTISED_FIBRE;
12267
12268                 if (advertising & ~mask)
12269                         return -EINVAL;
12270
12271                 mask &= (ADVERTISED_1000baseT_Half |
12272                          ADVERTISED_1000baseT_Full |
12273                          ADVERTISED_100baseT_Half |
12274                          ADVERTISED_100baseT_Full |
12275                          ADVERTISED_10baseT_Half |
12276                          ADVERTISED_10baseT_Full);
12277
12278                 advertising &= mask;
12279         } else {
12280                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12281                         if (speed != SPEED_1000)
12282                                 return -EINVAL;
12283
12284                         if (cmd->base.duplex != DUPLEX_FULL)
12285                                 return -EINVAL;
12286                 } else {
12287                         if (speed != SPEED_100 &&
12288                             speed != SPEED_10)
12289                                 return -EINVAL;
12290                 }
12291         }
12292
12293         tg3_full_lock(tp, 0);
12294
12295         tp->link_config.autoneg = cmd->base.autoneg;
12296         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12297                 tp->link_config.advertising = (advertising |
12298                                               ADVERTISED_Autoneg);
12299                 tp->link_config.speed = SPEED_UNKNOWN;
12300                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12301         } else {
12302                 tp->link_config.advertising = 0;
12303                 tp->link_config.speed = speed;
12304                 tp->link_config.duplex = cmd->base.duplex;
12305         }
12306
12307         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12308
12309         tg3_warn_mgmt_link_flap(tp);
12310
12311         if (netif_running(dev))
12312                 tg3_setup_phy(tp, true);
12313
12314         tg3_full_unlock(tp);
12315
12316         return 0;
12317 }
12318
12319 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12320 {
12321         struct tg3 *tp = netdev_priv(dev);
12322
12323         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12324         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12325         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12326         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12327 }
12328
12329 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12330 {
12331         struct tg3 *tp = netdev_priv(dev);
12332
12333         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12334                 wol->supported = WAKE_MAGIC;
12335         else
12336                 wol->supported = 0;
12337         wol->wolopts = 0;
12338         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12339                 wol->wolopts = WAKE_MAGIC;
12340         memset(&wol->sopass, 0, sizeof(wol->sopass));
12341 }
12342
12343 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12344 {
12345         struct tg3 *tp = netdev_priv(dev);
12346         struct device *dp = &tp->pdev->dev;
12347
12348         if (wol->wolopts & ~WAKE_MAGIC)
12349                 return -EINVAL;
12350         if ((wol->wolopts & WAKE_MAGIC) &&
12351             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12352                 return -EINVAL;
12353
12354         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12355
12356         if (device_may_wakeup(dp))
12357                 tg3_flag_set(tp, WOL_ENABLE);
12358         else
12359                 tg3_flag_clear(tp, WOL_ENABLE);
12360
12361         return 0;
12362 }
12363
12364 static u32 tg3_get_msglevel(struct net_device *dev)
12365 {
12366         struct tg3 *tp = netdev_priv(dev);
12367         return tp->msg_enable;
12368 }
12369
12370 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12371 {
12372         struct tg3 *tp = netdev_priv(dev);
12373         tp->msg_enable = value;
12374 }
12375
12376 static int tg3_nway_reset(struct net_device *dev)
12377 {
12378         struct tg3 *tp = netdev_priv(dev);
12379         int r;
12380
12381         if (!netif_running(dev))
12382                 return -EAGAIN;
12383
12384         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12385                 return -EINVAL;
12386
12387         tg3_warn_mgmt_link_flap(tp);
12388
12389         if (tg3_flag(tp, USE_PHYLIB)) {
12390                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12391                         return -EAGAIN;
12392                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12393         } else {
12394                 u32 bmcr;
12395
12396                 spin_lock_bh(&tp->lock);
12397                 r = -EINVAL;
12398                 tg3_readphy(tp, MII_BMCR, &bmcr);
12399                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12400                     ((bmcr & BMCR_ANENABLE) ||
12401                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12402                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12403                                                    BMCR_ANENABLE);
12404                         r = 0;
12405                 }
12406                 spin_unlock_bh(&tp->lock);
12407         }
12408
12409         return r;
12410 }
12411
12412 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12413 {
12414         struct tg3 *tp = netdev_priv(dev);
12415
12416         ering->rx_max_pending = tp->rx_std_ring_mask;
12417         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12418                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12419         else
12420                 ering->rx_jumbo_max_pending = 0;
12421
12422         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12423
12424         ering->rx_pending = tp->rx_pending;
12425         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12426                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12427         else
12428                 ering->rx_jumbo_pending = 0;
12429
12430         ering->tx_pending = tp->napi[0].tx_pending;
12431 }
12432
12433 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12434 {
12435         struct tg3 *tp = netdev_priv(dev);
12436         int i, irq_sync = 0, err = 0;
12437         bool reset_phy = false;
12438
12439         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12440             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12441             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12442             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12443             (tg3_flag(tp, TSO_BUG) &&
12444              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12445                 return -EINVAL;
12446
12447         if (netif_running(dev)) {
12448                 tg3_phy_stop(tp);
12449                 tg3_netif_stop(tp);
12450                 irq_sync = 1;
12451         }
12452
12453         tg3_full_lock(tp, irq_sync);
12454
12455         tp->rx_pending = ering->rx_pending;
12456
12457         if (tg3_flag(tp, MAX_RXPEND_64) &&
12458             tp->rx_pending > 63)
12459                 tp->rx_pending = 63;
12460
12461         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12462                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12463
12464         for (i = 0; i < tp->irq_max; i++)
12465                 tp->napi[i].tx_pending = ering->tx_pending;
12466
12467         if (netif_running(dev)) {
12468                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12469                 /* Reset PHY to avoid PHY lock up */
12470                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12471                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12472                     tg3_asic_rev(tp) == ASIC_REV_5720)
12473                         reset_phy = true;
12474
12475                 err = tg3_restart_hw(tp, reset_phy);
12476                 if (!err)
12477                         tg3_netif_start(tp);
12478         }
12479
12480         tg3_full_unlock(tp);
12481
12482         if (irq_sync && !err)
12483                 tg3_phy_start(tp);
12484
12485         return err;
12486 }
12487
12488 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12489 {
12490         struct tg3 *tp = netdev_priv(dev);
12491
12492         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12493
12494         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12495                 epause->rx_pause = 1;
12496         else
12497                 epause->rx_pause = 0;
12498
12499         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12500                 epause->tx_pause = 1;
12501         else
12502                 epause->tx_pause = 0;
12503 }
12504
12505 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12506 {
12507         struct tg3 *tp = netdev_priv(dev);
12508         int err = 0;
12509         bool reset_phy = false;
12510
12511         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12512                 tg3_warn_mgmt_link_flap(tp);
12513
12514         if (tg3_flag(tp, USE_PHYLIB)) {
12515                 struct phy_device *phydev;
12516
12517                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12518
12519                 if (!phy_validate_pause(phydev, epause))
12520                         return -EINVAL;
12521
12522                 tp->link_config.flowctrl = 0;
12523                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12524                 if (epause->rx_pause) {
12525                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12526
12527                         if (epause->tx_pause) {
12528                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12529                         }
12530                 } else if (epause->tx_pause) {
12531                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12532                 }
12533
12534                 if (epause->autoneg)
12535                         tg3_flag_set(tp, PAUSE_AUTONEG);
12536                 else
12537                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12538
12539                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12540                         if (phydev->autoneg) {
12541                                 /* phy_set_asym_pause() will
12542                                  * renegotiate the link to inform our
12543                                  * link partner of our flow control
12544                                  * settings, even if the flow control
12545                                  * is forced.  Let tg3_adjust_link()
12546                                  * do the final flow control setup.
12547                                  */
12548                                 return 0;
12549                         }
12550
12551                         if (!epause->autoneg)
12552                                 tg3_setup_flow_control(tp, 0, 0);
12553                 }
12554         } else {
12555                 int irq_sync = 0;
12556
12557                 if (netif_running(dev)) {
12558                         tg3_netif_stop(tp);
12559                         irq_sync = 1;
12560                 }
12561
12562                 tg3_full_lock(tp, irq_sync);
12563
12564                 if (epause->autoneg)
12565                         tg3_flag_set(tp, PAUSE_AUTONEG);
12566                 else
12567                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12568                 if (epause->rx_pause)
12569                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12570                 else
12571                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12572                 if (epause->tx_pause)
12573                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12574                 else
12575                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12576
12577                 if (netif_running(dev)) {
12578                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12579                         /* Reset PHY to avoid PHY lock up */
12580                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12581                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12582                             tg3_asic_rev(tp) == ASIC_REV_5720)
12583                                 reset_phy = true;
12584
12585                         err = tg3_restart_hw(tp, reset_phy);
12586                         if (!err)
12587                                 tg3_netif_start(tp);
12588                 }
12589
12590                 tg3_full_unlock(tp);
12591         }
12592
12593         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12594
12595         return err;
12596 }
12597
12598 static int tg3_get_sset_count(struct net_device *dev, int sset)
12599 {
12600         switch (sset) {
12601         case ETH_SS_TEST:
12602                 return TG3_NUM_TEST;
12603         case ETH_SS_STATS:
12604                 return TG3_NUM_STATS;
12605         default:
12606                 return -EOPNOTSUPP;
12607         }
12608 }
12609
12610 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12611                          u32 *rules __always_unused)
12612 {
12613         struct tg3 *tp = netdev_priv(dev);
12614
12615         if (!tg3_flag(tp, SUPPORT_MSIX))
12616                 return -EOPNOTSUPP;
12617
12618         switch (info->cmd) {
12619         case ETHTOOL_GRXRINGS:
12620                 if (netif_running(tp->dev))
12621                         info->data = tp->rxq_cnt;
12622                 else {
12623                         info->data = num_online_cpus();
12624                         if (info->data > TG3_RSS_MAX_NUM_QS)
12625                                 info->data = TG3_RSS_MAX_NUM_QS;
12626                 }
12627
12628                 return 0;
12629
12630         default:
12631                 return -EOPNOTSUPP;
12632         }
12633 }
12634
12635 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12636 {
12637         u32 size = 0;
12638         struct tg3 *tp = netdev_priv(dev);
12639
12640         if (tg3_flag(tp, SUPPORT_MSIX))
12641                 size = TG3_RSS_INDIR_TBL_SIZE;
12642
12643         return size;
12644 }
12645
12646 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12647 {
12648         struct tg3 *tp = netdev_priv(dev);
12649         int i;
12650
12651         if (hfunc)
12652                 *hfunc = ETH_RSS_HASH_TOP;
12653         if (!indir)
12654                 return 0;
12655
12656         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12657                 indir[i] = tp->rss_ind_tbl[i];
12658
12659         return 0;
12660 }
12661
12662 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12663                         const u8 hfunc)
12664 {
12665         struct tg3 *tp = netdev_priv(dev);
12666         size_t i;
12667
12668         /* We require at least one supported parameter to be changed and no
12669          * change in any of the unsupported parameters
12670          */
12671         if (key ||
12672             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12673                 return -EOPNOTSUPP;
12674
12675         if (!indir)
12676                 return 0;
12677
12678         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12679                 tp->rss_ind_tbl[i] = indir[i];
12680
12681         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12682                 return 0;
12683
12684         /* It is legal to write the indirection
12685          * table while the device is running.
12686          */
12687         tg3_full_lock(tp, 0);
12688         tg3_rss_write_indir_tbl(tp);
12689         tg3_full_unlock(tp);
12690
12691         return 0;
12692 }
12693
12694 static void tg3_get_channels(struct net_device *dev,
12695                              struct ethtool_channels *channel)
12696 {
12697         struct tg3 *tp = netdev_priv(dev);
12698         u32 deflt_qs = netif_get_num_default_rss_queues();
12699
12700         channel->max_rx = tp->rxq_max;
12701         channel->max_tx = tp->txq_max;
12702
12703         if (netif_running(dev)) {
12704                 channel->rx_count = tp->rxq_cnt;
12705                 channel->tx_count = tp->txq_cnt;
12706         } else {
12707                 if (tp->rxq_req)
12708                         channel->rx_count = tp->rxq_req;
12709                 else
12710                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12711
12712                 if (tp->txq_req)
12713                         channel->tx_count = tp->txq_req;
12714                 else
12715                         channel->tx_count = min(deflt_qs, tp->txq_max);
12716         }
12717 }
12718
12719 static int tg3_set_channels(struct net_device *dev,
12720                             struct ethtool_channels *channel)
12721 {
12722         struct tg3 *tp = netdev_priv(dev);
12723
12724         if (!tg3_flag(tp, SUPPORT_MSIX))
12725                 return -EOPNOTSUPP;
12726
12727         if (channel->rx_count > tp->rxq_max ||
12728             channel->tx_count > tp->txq_max)
12729                 return -EINVAL;
12730
12731         tp->rxq_req = channel->rx_count;
12732         tp->txq_req = channel->tx_count;
12733
12734         if (!netif_running(dev))
12735                 return 0;
12736
12737         tg3_stop(tp);
12738
12739         tg3_carrier_off(tp);
12740
12741         tg3_start(tp, true, false, false);
12742
12743         return 0;
12744 }
12745
12746 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12747 {
12748         switch (stringset) {
12749         case ETH_SS_STATS:
12750                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12751                 break;
12752         case ETH_SS_TEST:
12753                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12754                 break;
12755         default:
12756                 WARN_ON(1);     /* we need a WARN() */
12757                 break;
12758         }
12759 }
12760
12761 static int tg3_set_phys_id(struct net_device *dev,
12762                             enum ethtool_phys_id_state state)
12763 {
12764         struct tg3 *tp = netdev_priv(dev);
12765
12766         switch (state) {
12767         case ETHTOOL_ID_ACTIVE:
12768                 return 1;       /* cycle on/off once per second */
12769
12770         case ETHTOOL_ID_ON:
12771                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12772                      LED_CTRL_1000MBPS_ON |
12773                      LED_CTRL_100MBPS_ON |
12774                      LED_CTRL_10MBPS_ON |
12775                      LED_CTRL_TRAFFIC_OVERRIDE |
12776                      LED_CTRL_TRAFFIC_BLINK |
12777                      LED_CTRL_TRAFFIC_LED);
12778                 break;
12779
12780         case ETHTOOL_ID_OFF:
12781                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12782                      LED_CTRL_TRAFFIC_OVERRIDE);
12783                 break;
12784
12785         case ETHTOOL_ID_INACTIVE:
12786                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12787                 break;
12788         }
12789
12790         return 0;
12791 }
12792
12793 static void tg3_get_ethtool_stats(struct net_device *dev,
12794                                    struct ethtool_stats *estats, u64 *tmp_stats)
12795 {
12796         struct tg3 *tp = netdev_priv(dev);
12797
12798         if (tp->hw_stats)
12799                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12800         else
12801                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12802 }
12803
12804 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12805 {
12806         int i;
12807         __be32 *buf;
12808         u32 offset = 0, len = 0;
12809         u32 magic, val;
12810
12811         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12812                 return NULL;
12813
12814         if (magic == TG3_EEPROM_MAGIC) {
12815                 for (offset = TG3_NVM_DIR_START;
12816                      offset < TG3_NVM_DIR_END;
12817                      offset += TG3_NVM_DIRENT_SIZE) {
12818                         if (tg3_nvram_read(tp, offset, &val))
12819                                 return NULL;
12820
12821                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12822                             TG3_NVM_DIRTYPE_EXTVPD)
12823                                 break;
12824                 }
12825
12826                 if (offset != TG3_NVM_DIR_END) {
12827                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12828                         if (tg3_nvram_read(tp, offset + 4, &offset))
12829                                 return NULL;
12830
12831                         offset = tg3_nvram_logical_addr(tp, offset);
12832                 }
12833         }
12834
12835         if (!offset || !len) {
12836                 offset = TG3_NVM_VPD_OFF;
12837                 len = TG3_NVM_VPD_LEN;
12838         }
12839
12840         buf = kmalloc(len, GFP_KERNEL);
12841         if (buf == NULL)
12842                 return NULL;
12843
12844         if (magic == TG3_EEPROM_MAGIC) {
12845                 for (i = 0; i < len; i += 4) {
12846                         /* The data is in little-endian format in NVRAM.
12847                          * Use the big-endian read routines to preserve
12848                          * the byte order as it exists in NVRAM.
12849                          */
12850                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12851                                 goto error;
12852                 }
12853         } else {
12854                 u8 *ptr;
12855                 ssize_t cnt;
12856                 unsigned int pos = 0;
12857
12858                 ptr = (u8 *)&buf[0];
12859                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12860                         cnt = pci_read_vpd(tp->pdev, pos,
12861                                            len - pos, ptr);
12862                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12863                                 cnt = 0;
12864                         else if (cnt < 0)
12865                                 goto error;
12866                 }
12867                 if (pos != len)
12868                         goto error;
12869         }
12870
12871         *vpdlen = len;
12872
12873         return buf;
12874
12875 error:
12876         kfree(buf);
12877         return NULL;
12878 }
12879
12880 #define NVRAM_TEST_SIZE 0x100
12881 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12882 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12883 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12884 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12885 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12886 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12887 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12888 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12889
12890 static int tg3_test_nvram(struct tg3 *tp)
12891 {
12892         u32 csum, magic, len;
12893         __be32 *buf;
12894         int i, j, k, err = 0, size;
12895
12896         if (tg3_flag(tp, NO_NVRAM))
12897                 return 0;
12898
12899         if (tg3_nvram_read(tp, 0, &magic) != 0)
12900                 return -EIO;
12901
12902         if (magic == TG3_EEPROM_MAGIC)
12903                 size = NVRAM_TEST_SIZE;
12904         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12905                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12906                     TG3_EEPROM_SB_FORMAT_1) {
12907                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12908                         case TG3_EEPROM_SB_REVISION_0:
12909                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12910                                 break;
12911                         case TG3_EEPROM_SB_REVISION_2:
12912                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12913                                 break;
12914                         case TG3_EEPROM_SB_REVISION_3:
12915                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12916                                 break;
12917                         case TG3_EEPROM_SB_REVISION_4:
12918                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12919                                 break;
12920                         case TG3_EEPROM_SB_REVISION_5:
12921                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12922                                 break;
12923                         case TG3_EEPROM_SB_REVISION_6:
12924                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12925                                 break;
12926                         default:
12927                                 return -EIO;
12928                         }
12929                 } else
12930                         return 0;
12931         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12932                 size = NVRAM_SELFBOOT_HW_SIZE;
12933         else
12934                 return -EIO;
12935
12936         buf = kmalloc(size, GFP_KERNEL);
12937         if (buf == NULL)
12938                 return -ENOMEM;
12939
12940         err = -EIO;
12941         for (i = 0, j = 0; i < size; i += 4, j++) {
12942                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12943                 if (err)
12944                         break;
12945         }
12946         if (i < size)
12947                 goto out;
12948
12949         /* Selfboot format */
12950         magic = be32_to_cpu(buf[0]);
12951         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12952             TG3_EEPROM_MAGIC_FW) {
12953                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12954
12955                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12956                     TG3_EEPROM_SB_REVISION_2) {
12957                         /* For rev 2, the csum doesn't include the MBA. */
12958                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12959                                 csum8 += buf8[i];
12960                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12961                                 csum8 += buf8[i];
12962                 } else {
12963                         for (i = 0; i < size; i++)
12964                                 csum8 += buf8[i];
12965                 }
12966
12967                 if (csum8 == 0) {
12968                         err = 0;
12969                         goto out;
12970                 }
12971
12972                 err = -EIO;
12973                 goto out;
12974         }
12975
12976         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12977             TG3_EEPROM_MAGIC_HW) {
12978                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12979                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12980                 u8 *buf8 = (u8 *) buf;
12981
12982                 /* Separate the parity bits and the data bytes.  */
12983                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12984                         if ((i == 0) || (i == 8)) {
12985                                 int l;
12986                                 u8 msk;
12987
12988                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12989                                         parity[k++] = buf8[i] & msk;
12990                                 i++;
12991                         } else if (i == 16) {
12992                                 int l;
12993                                 u8 msk;
12994
12995                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12996                                         parity[k++] = buf8[i] & msk;
12997                                 i++;
12998
12999                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13000                                         parity[k++] = buf8[i] & msk;
13001                                 i++;
13002                         }
13003                         data[j++] = buf8[i];
13004                 }
13005
13006                 err = -EIO;
13007                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13008                         u8 hw8 = hweight8(data[i]);
13009
13010                         if ((hw8 & 0x1) && parity[i])
13011                                 goto out;
13012                         else if (!(hw8 & 0x1) && !parity[i])
13013                                 goto out;
13014                 }
13015                 err = 0;
13016                 goto out;
13017         }
13018
13019         err = -EIO;
13020
13021         /* Bootstrap checksum at offset 0x10 */
13022         csum = calc_crc((unsigned char *) buf, 0x10);
13023         if (csum != le32_to_cpu(buf[0x10/4]))
13024                 goto out;
13025
13026         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13027         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13028         if (csum != le32_to_cpu(buf[0xfc/4]))
13029                 goto out;
13030
13031         kfree(buf);
13032
13033         buf = tg3_vpd_readblock(tp, &len);
13034         if (!buf)
13035                 return -ENOMEM;
13036
13037         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13038         if (i > 0) {
13039                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13040                 if (j < 0)
13041                         goto out;
13042
13043                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13044                         goto out;
13045
13046                 i += PCI_VPD_LRDT_TAG_SIZE;
13047                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13048                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13049                 if (j > 0) {
13050                         u8 csum8 = 0;
13051
13052                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13053
13054                         for (i = 0; i <= j; i++)
13055                                 csum8 += ((u8 *)buf)[i];
13056
13057                         if (csum8)
13058                                 goto out;
13059                 }
13060         }
13061
13062         err = 0;
13063
13064 out:
13065         kfree(buf);
13066         return err;
13067 }
13068
13069 #define TG3_SERDES_TIMEOUT_SEC  2
13070 #define TG3_COPPER_TIMEOUT_SEC  6
13071
13072 static int tg3_test_link(struct tg3 *tp)
13073 {
13074         int i, max;
13075
13076         if (!netif_running(tp->dev))
13077                 return -ENODEV;
13078
13079         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13080                 max = TG3_SERDES_TIMEOUT_SEC;
13081         else
13082                 max = TG3_COPPER_TIMEOUT_SEC;
13083
13084         for (i = 0; i < max; i++) {
13085                 if (tp->link_up)
13086                         return 0;
13087
13088                 if (msleep_interruptible(1000))
13089                         break;
13090         }
13091
13092         return -EIO;
13093 }
13094
13095 /* Only test the commonly used registers */
13096 static int tg3_test_registers(struct tg3 *tp)
13097 {
13098         int i, is_5705, is_5750;
13099         u32 offset, read_mask, write_mask, val, save_val, read_val;
13100         static struct {
13101                 u16 offset;
13102                 u16 flags;
13103 #define TG3_FL_5705     0x1
13104 #define TG3_FL_NOT_5705 0x2
13105 #define TG3_FL_NOT_5788 0x4
13106 #define TG3_FL_NOT_5750 0x8
13107                 u32 read_mask;
13108                 u32 write_mask;
13109         } reg_tbl[] = {
13110                 /* MAC Control Registers */
13111                 { MAC_MODE, TG3_FL_NOT_5705,
13112                         0x00000000, 0x00ef6f8c },
13113                 { MAC_MODE, TG3_FL_5705,
13114                         0x00000000, 0x01ef6b8c },
13115                 { MAC_STATUS, TG3_FL_NOT_5705,
13116                         0x03800107, 0x00000000 },
13117                 { MAC_STATUS, TG3_FL_5705,
13118                         0x03800100, 0x00000000 },
13119                 { MAC_ADDR_0_HIGH, 0x0000,
13120                         0x00000000, 0x0000ffff },
13121                 { MAC_ADDR_0_LOW, 0x0000,
13122                         0x00000000, 0xffffffff },
13123                 { MAC_RX_MTU_SIZE, 0x0000,
13124                         0x00000000, 0x0000ffff },
13125                 { MAC_TX_MODE, 0x0000,
13126                         0x00000000, 0x00000070 },
13127                 { MAC_TX_LENGTHS, 0x0000,
13128                         0x00000000, 0x00003fff },
13129                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13130                         0x00000000, 0x000007fc },
13131                 { MAC_RX_MODE, TG3_FL_5705,
13132                         0x00000000, 0x000007dc },
13133                 { MAC_HASH_REG_0, 0x0000,
13134                         0x00000000, 0xffffffff },
13135                 { MAC_HASH_REG_1, 0x0000,
13136                         0x00000000, 0xffffffff },
13137                 { MAC_HASH_REG_2, 0x0000,
13138                         0x00000000, 0xffffffff },
13139                 { MAC_HASH_REG_3, 0x0000,
13140                         0x00000000, 0xffffffff },
13141
13142                 /* Receive Data and Receive BD Initiator Control Registers. */
13143                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13144                         0x00000000, 0xffffffff },
13145                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13146                         0x00000000, 0xffffffff },
13147                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13148                         0x00000000, 0x00000003 },
13149                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13150                         0x00000000, 0xffffffff },
13151                 { RCVDBDI_STD_BD+0, 0x0000,
13152                         0x00000000, 0xffffffff },
13153                 { RCVDBDI_STD_BD+4, 0x0000,
13154                         0x00000000, 0xffffffff },
13155                 { RCVDBDI_STD_BD+8, 0x0000,
13156                         0x00000000, 0xffff0002 },
13157                 { RCVDBDI_STD_BD+0xc, 0x0000,
13158                         0x00000000, 0xffffffff },
13159
13160                 /* Receive BD Initiator Control Registers. */
13161                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13162                         0x00000000, 0xffffffff },
13163                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13164                         0x00000000, 0x000003ff },
13165                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13166                         0x00000000, 0xffffffff },
13167
13168                 /* Host Coalescing Control Registers. */
13169                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13170                         0x00000000, 0x00000004 },
13171                 { HOSTCC_MODE, TG3_FL_5705,
13172                         0x00000000, 0x000000f6 },
13173                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13174                         0x00000000, 0xffffffff },
13175                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13176                         0x00000000, 0x000003ff },
13177                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13178                         0x00000000, 0xffffffff },
13179                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13180                         0x00000000, 0x000003ff },
13181                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13182                         0x00000000, 0xffffffff },
13183                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13184                         0x00000000, 0x000000ff },
13185                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13186                         0x00000000, 0xffffffff },
13187                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13188                         0x00000000, 0x000000ff },
13189                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13190                         0x00000000, 0xffffffff },
13191                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13192                         0x00000000, 0xffffffff },
13193                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13194                         0x00000000, 0xffffffff },
13195                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13196                         0x00000000, 0x000000ff },
13197                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13198                         0x00000000, 0xffffffff },
13199                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13200                         0x00000000, 0x000000ff },
13201                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13202                         0x00000000, 0xffffffff },
13203                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13204                         0x00000000, 0xffffffff },
13205                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13206                         0x00000000, 0xffffffff },
13207                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13208                         0x00000000, 0xffffffff },
13209                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13210                         0x00000000, 0xffffffff },
13211                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13212                         0xffffffff, 0x00000000 },
13213                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13214                         0xffffffff, 0x00000000 },
13215
13216                 /* Buffer Manager Control Registers. */
13217                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13218                         0x00000000, 0x007fff80 },
13219                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13220                         0x00000000, 0x007fffff },
13221                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13222                         0x00000000, 0x0000003f },
13223                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13224                         0x00000000, 0x000001ff },
13225                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13226                         0x00000000, 0x000001ff },
13227                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13228                         0xffffffff, 0x00000000 },
13229                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13230                         0xffffffff, 0x00000000 },
13231
13232                 /* Mailbox Registers */
13233                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13234                         0x00000000, 0x000001ff },
13235                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13236                         0x00000000, 0x000001ff },
13237                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13238                         0x00000000, 0x000007ff },
13239                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13240                         0x00000000, 0x000001ff },
13241
13242                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13243         };
13244
13245         is_5705 = is_5750 = 0;
13246         if (tg3_flag(tp, 5705_PLUS)) {
13247                 is_5705 = 1;
13248                 if (tg3_flag(tp, 5750_PLUS))
13249                         is_5750 = 1;
13250         }
13251
13252         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13253                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13254                         continue;
13255
13256                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13257                         continue;
13258
13259                 if (tg3_flag(tp, IS_5788) &&
13260                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13261                         continue;
13262
13263                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13264                         continue;
13265
13266                 offset = (u32) reg_tbl[i].offset;
13267                 read_mask = reg_tbl[i].read_mask;
13268                 write_mask = reg_tbl[i].write_mask;
13269
13270                 /* Save the original register content */
13271                 save_val = tr32(offset);
13272
13273                 /* Determine the read-only value. */
13274                 read_val = save_val & read_mask;
13275
13276                 /* Write zero to the register, then make sure the read-only bits
13277                  * are not changed and the read/write bits are all zeros.
13278                  */
13279                 tw32(offset, 0);
13280
13281                 val = tr32(offset);
13282
13283                 /* Test the read-only and read/write bits. */
13284                 if (((val & read_mask) != read_val) || (val & write_mask))
13285                         goto out;
13286
13287                 /* Write ones to all the bits defined by RdMask and WrMask, then
13288                  * make sure the read-only bits are not changed and the
13289                  * read/write bits are all ones.
13290                  */
13291                 tw32(offset, read_mask | write_mask);
13292
13293                 val = tr32(offset);
13294
13295                 /* Test the read-only bits. */
13296                 if ((val & read_mask) != read_val)
13297                         goto out;
13298
13299                 /* Test the read/write bits. */
13300                 if ((val & write_mask) != write_mask)
13301                         goto out;
13302
13303                 tw32(offset, save_val);
13304         }
13305
13306         return 0;
13307
13308 out:
13309         if (netif_msg_hw(tp))
13310                 netdev_err(tp->dev,
13311                            "Register test failed at offset %x\n", offset);
13312         tw32(offset, save_val);
13313         return -EIO;
13314 }
13315
13316 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13317 {
13318         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13319         int i;
13320         u32 j;
13321
13322         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13323                 for (j = 0; j < len; j += 4) {
13324                         u32 val;
13325
13326                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13327                         tg3_read_mem(tp, offset + j, &val);
13328                         if (val != test_pattern[i])
13329                                 return -EIO;
13330                 }
13331         }
13332         return 0;
13333 }
13334
13335 static int tg3_test_memory(struct tg3 *tp)
13336 {
13337         static struct mem_entry {
13338                 u32 offset;
13339                 u32 len;
13340         } mem_tbl_570x[] = {
13341                 { 0x00000000, 0x00b50},
13342                 { 0x00002000, 0x1c000},
13343                 { 0xffffffff, 0x00000}
13344         }, mem_tbl_5705[] = {
13345                 { 0x00000100, 0x0000c},
13346                 { 0x00000200, 0x00008},
13347                 { 0x00004000, 0x00800},
13348                 { 0x00006000, 0x01000},
13349                 { 0x00008000, 0x02000},
13350                 { 0x00010000, 0x0e000},
13351                 { 0xffffffff, 0x00000}
13352         }, mem_tbl_5755[] = {
13353                 { 0x00000200, 0x00008},
13354                 { 0x00004000, 0x00800},
13355                 { 0x00006000, 0x00800},
13356                 { 0x00008000, 0x02000},
13357                 { 0x00010000, 0x0c000},
13358                 { 0xffffffff, 0x00000}
13359         }, mem_tbl_5906[] = {
13360                 { 0x00000200, 0x00008},
13361                 { 0x00004000, 0x00400},
13362                 { 0x00006000, 0x00400},
13363                 { 0x00008000, 0x01000},
13364                 { 0x00010000, 0x01000},
13365                 { 0xffffffff, 0x00000}
13366         }, mem_tbl_5717[] = {
13367                 { 0x00000200, 0x00008},
13368                 { 0x00010000, 0x0a000},
13369                 { 0x00020000, 0x13c00},
13370                 { 0xffffffff, 0x00000}
13371         }, mem_tbl_57765[] = {
13372                 { 0x00000200, 0x00008},
13373                 { 0x00004000, 0x00800},
13374                 { 0x00006000, 0x09800},
13375                 { 0x00010000, 0x0a000},
13376                 { 0xffffffff, 0x00000}
13377         };
13378         struct mem_entry *mem_tbl;
13379         int err = 0;
13380         int i;
13381
13382         if (tg3_flag(tp, 5717_PLUS))
13383                 mem_tbl = mem_tbl_5717;
13384         else if (tg3_flag(tp, 57765_CLASS) ||
13385                  tg3_asic_rev(tp) == ASIC_REV_5762)
13386                 mem_tbl = mem_tbl_57765;
13387         else if (tg3_flag(tp, 5755_PLUS))
13388                 mem_tbl = mem_tbl_5755;
13389         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13390                 mem_tbl = mem_tbl_5906;
13391         else if (tg3_flag(tp, 5705_PLUS))
13392                 mem_tbl = mem_tbl_5705;
13393         else
13394                 mem_tbl = mem_tbl_570x;
13395
13396         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13397                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13398                 if (err)
13399                         break;
13400         }
13401
13402         return err;
13403 }
13404
13405 #define TG3_TSO_MSS             500
13406
13407 #define TG3_TSO_IP_HDR_LEN      20
13408 #define TG3_TSO_TCP_HDR_LEN     20
13409 #define TG3_TSO_TCP_OPT_LEN     12
13410
13411 static const u8 tg3_tso_header[] = {
13412 0x08, 0x00,
13413 0x45, 0x00, 0x00, 0x00,
13414 0x00, 0x00, 0x40, 0x00,
13415 0x40, 0x06, 0x00, 0x00,
13416 0x0a, 0x00, 0x00, 0x01,
13417 0x0a, 0x00, 0x00, 0x02,
13418 0x0d, 0x00, 0xe0, 0x00,
13419 0x00, 0x00, 0x01, 0x00,
13420 0x00, 0x00, 0x02, 0x00,
13421 0x80, 0x10, 0x10, 0x00,
13422 0x14, 0x09, 0x00, 0x00,
13423 0x01, 0x01, 0x08, 0x0a,
13424 0x11, 0x11, 0x11, 0x11,
13425 0x11, 0x11, 0x11, 0x11,
13426 };
13427
13428 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13429 {
13430         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13431         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13432         u32 budget;
13433         struct sk_buff *skb;
13434         u8 *tx_data, *rx_data;
13435         dma_addr_t map;
13436         int num_pkts, tx_len, rx_len, i, err;
13437         struct tg3_rx_buffer_desc *desc;
13438         struct tg3_napi *tnapi, *rnapi;
13439         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13440
13441         tnapi = &tp->napi[0];
13442         rnapi = &tp->napi[0];
13443         if (tp->irq_cnt > 1) {
13444                 if (tg3_flag(tp, ENABLE_RSS))
13445                         rnapi = &tp->napi[1];
13446                 if (tg3_flag(tp, ENABLE_TSS))
13447                         tnapi = &tp->napi[1];
13448         }
13449         coal_now = tnapi->coal_now | rnapi->coal_now;
13450
13451         err = -EIO;
13452
13453         tx_len = pktsz;
13454         skb = netdev_alloc_skb(tp->dev, tx_len);
13455         if (!skb)
13456                 return -ENOMEM;
13457
13458         tx_data = skb_put(skb, tx_len);
13459         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13460         memset(tx_data + ETH_ALEN, 0x0, 8);
13461
13462         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13463
13464         if (tso_loopback) {
13465                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13466
13467                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13468                               TG3_TSO_TCP_OPT_LEN;
13469
13470                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13471                        sizeof(tg3_tso_header));
13472                 mss = TG3_TSO_MSS;
13473
13474                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13475                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13476
13477                 /* Set the total length field in the IP header */
13478                 iph->tot_len = htons((u16)(mss + hdr_len));
13479
13480                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13481                               TXD_FLAG_CPU_POST_DMA);
13482
13483                 if (tg3_flag(tp, HW_TSO_1) ||
13484                     tg3_flag(tp, HW_TSO_2) ||
13485                     tg3_flag(tp, HW_TSO_3)) {
13486                         struct tcphdr *th;
13487                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13488                         th = (struct tcphdr *)&tx_data[val];
13489                         th->check = 0;
13490                 } else
13491                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13492
13493                 if (tg3_flag(tp, HW_TSO_3)) {
13494                         mss |= (hdr_len & 0xc) << 12;
13495                         if (hdr_len & 0x10)
13496                                 base_flags |= 0x00000010;
13497                         base_flags |= (hdr_len & 0x3e0) << 5;
13498                 } else if (tg3_flag(tp, HW_TSO_2))
13499                         mss |= hdr_len << 9;
13500                 else if (tg3_flag(tp, HW_TSO_1) ||
13501                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13502                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13503                 } else {
13504                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13505                 }
13506
13507                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13508         } else {
13509                 num_pkts = 1;
13510                 data_off = ETH_HLEN;
13511
13512                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13513                     tx_len > VLAN_ETH_FRAME_LEN)
13514                         base_flags |= TXD_FLAG_JMB_PKT;
13515         }
13516
13517         for (i = data_off; i < tx_len; i++)
13518                 tx_data[i] = (u8) (i & 0xff);
13519
13520         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13521         if (pci_dma_mapping_error(tp->pdev, map)) {
13522                 dev_kfree_skb(skb);
13523                 return -EIO;
13524         }
13525
13526         val = tnapi->tx_prod;
13527         tnapi->tx_buffers[val].skb = skb;
13528         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13529
13530         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13531                rnapi->coal_now);
13532
13533         udelay(10);
13534
13535         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13536
13537         budget = tg3_tx_avail(tnapi);
13538         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13539                             base_flags | TXD_FLAG_END, mss, 0)) {
13540                 tnapi->tx_buffers[val].skb = NULL;
13541                 dev_kfree_skb(skb);
13542                 return -EIO;
13543         }
13544
13545         tnapi->tx_prod++;
13546
13547         /* Sync BD data before updating mailbox */
13548         wmb();
13549
13550         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13551         tr32_mailbox(tnapi->prodmbox);
13552
13553         udelay(10);
13554
13555         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13556         for (i = 0; i < 35; i++) {
13557                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13558                        coal_now);
13559
13560                 udelay(10);
13561
13562                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13563                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13564                 if ((tx_idx == tnapi->tx_prod) &&
13565                     (rx_idx == (rx_start_idx + num_pkts)))
13566                         break;
13567         }
13568
13569         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13570         dev_kfree_skb(skb);
13571
13572         if (tx_idx != tnapi->tx_prod)
13573                 goto out;
13574
13575         if (rx_idx != rx_start_idx + num_pkts)
13576                 goto out;
13577
13578         val = data_off;
13579         while (rx_idx != rx_start_idx) {
13580                 desc = &rnapi->rx_rcb[rx_start_idx++];
13581                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13582                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13583
13584                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13585                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13586                         goto out;
13587
13588                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13589                          - ETH_FCS_LEN;
13590
13591                 if (!tso_loopback) {
13592                         if (rx_len != tx_len)
13593                                 goto out;
13594
13595                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13596                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13597                                         goto out;
13598                         } else {
13599                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13600                                         goto out;
13601                         }
13602                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13603                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13604                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13605                         goto out;
13606                 }
13607
13608                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13609                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13610                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13611                                              mapping);
13612                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13613                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13614                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13615                                              mapping);
13616                 } else
13617                         goto out;
13618
13619                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13620                                             PCI_DMA_FROMDEVICE);
13621
13622                 rx_data += TG3_RX_OFFSET(tp);
13623                 for (i = data_off; i < rx_len; i++, val++) {
13624                         if (*(rx_data + i) != (u8) (val & 0xff))
13625                                 goto out;
13626                 }
13627         }
13628
13629         err = 0;
13630
13631         /* tg3_free_rings will unmap and free the rx_data */
13632 out:
13633         return err;
13634 }
13635
13636 #define TG3_STD_LOOPBACK_FAILED         1
13637 #define TG3_JMB_LOOPBACK_FAILED         2
13638 #define TG3_TSO_LOOPBACK_FAILED         4
13639 #define TG3_LOOPBACK_FAILED \
13640         (TG3_STD_LOOPBACK_FAILED | \
13641          TG3_JMB_LOOPBACK_FAILED | \
13642          TG3_TSO_LOOPBACK_FAILED)
13643
13644 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13645 {
13646         int err = -EIO;
13647         u32 eee_cap;
13648         u32 jmb_pkt_sz = 9000;
13649
13650         if (tp->dma_limit)
13651                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13652
13653         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13654         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13655
13656         if (!netif_running(tp->dev)) {
13657                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13658                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13659                 if (do_extlpbk)
13660                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13661                 goto done;
13662         }
13663
13664         err = tg3_reset_hw(tp, true);
13665         if (err) {
13666                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13667                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13668                 if (do_extlpbk)
13669                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13670                 goto done;
13671         }
13672
13673         if (tg3_flag(tp, ENABLE_RSS)) {
13674                 int i;
13675
13676                 /* Reroute all rx packets to the 1st queue */
13677                 for (i = MAC_RSS_INDIR_TBL_0;
13678                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13679                         tw32(i, 0x0);
13680         }
13681
13682         /* HW errata - mac loopback fails in some cases on 5780.
13683          * Normal traffic and PHY loopback are not affected by
13684          * errata.  Also, the MAC loopback test is deprecated for
13685          * all newer ASIC revisions.
13686          */
13687         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13688             !tg3_flag(tp, CPMU_PRESENT)) {
13689                 tg3_mac_loopback(tp, true);
13690
13691                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13692                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13693
13694                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13695                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13696                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13697
13698                 tg3_mac_loopback(tp, false);
13699         }
13700
13701         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13702             !tg3_flag(tp, USE_PHYLIB)) {
13703                 int i;
13704
13705                 tg3_phy_lpbk_set(tp, 0, false);
13706
13707                 /* Wait for link */
13708                 for (i = 0; i < 100; i++) {
13709                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13710                                 break;
13711                         mdelay(1);
13712                 }
13713
13714                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13715                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13716                 if (tg3_flag(tp, TSO_CAPABLE) &&
13717                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13718                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13719                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13720                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13721                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13722
13723                 if (do_extlpbk) {
13724                         tg3_phy_lpbk_set(tp, 0, true);
13725
13726                         /* All link indications report up, but the hardware
13727                          * isn't really ready for about 20 msec.  Double it
13728                          * to be sure.
13729                          */
13730                         mdelay(40);
13731
13732                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13733                                 data[TG3_EXT_LOOPB_TEST] |=
13734                                                         TG3_STD_LOOPBACK_FAILED;
13735                         if (tg3_flag(tp, TSO_CAPABLE) &&
13736                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13737                                 data[TG3_EXT_LOOPB_TEST] |=
13738                                                         TG3_TSO_LOOPBACK_FAILED;
13739                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13740                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13741                                 data[TG3_EXT_LOOPB_TEST] |=
13742                                                         TG3_JMB_LOOPBACK_FAILED;
13743                 }
13744
13745                 /* Re-enable gphy autopowerdown. */
13746                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13747                         tg3_phy_toggle_apd(tp, true);
13748         }
13749
13750         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13751                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13752
13753 done:
13754         tp->phy_flags |= eee_cap;
13755
13756         return err;
13757 }
13758
13759 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13760                           u64 *data)
13761 {
13762         struct tg3 *tp = netdev_priv(dev);
13763         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13764
13765         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13766                 if (tg3_power_up(tp)) {
13767                         etest->flags |= ETH_TEST_FL_FAILED;
13768                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13769                         return;
13770                 }
13771                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13772         }
13773
13774         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13775
13776         if (tg3_test_nvram(tp) != 0) {
13777                 etest->flags |= ETH_TEST_FL_FAILED;
13778                 data[TG3_NVRAM_TEST] = 1;
13779         }
13780         if (!doextlpbk && tg3_test_link(tp)) {
13781                 etest->flags |= ETH_TEST_FL_FAILED;
13782                 data[TG3_LINK_TEST] = 1;
13783         }
13784         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13785                 int err, err2 = 0, irq_sync = 0;
13786
13787                 if (netif_running(dev)) {
13788                         tg3_phy_stop(tp);
13789                         tg3_netif_stop(tp);
13790                         irq_sync = 1;
13791                 }
13792
13793                 tg3_full_lock(tp, irq_sync);
13794                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13795                 err = tg3_nvram_lock(tp);
13796                 tg3_halt_cpu(tp, RX_CPU_BASE);
13797                 if (!tg3_flag(tp, 5705_PLUS))
13798                         tg3_halt_cpu(tp, TX_CPU_BASE);
13799                 if (!err)
13800                         tg3_nvram_unlock(tp);
13801
13802                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13803                         tg3_phy_reset(tp);
13804
13805                 if (tg3_test_registers(tp) != 0) {
13806                         etest->flags |= ETH_TEST_FL_FAILED;
13807                         data[TG3_REGISTER_TEST] = 1;
13808                 }
13809
13810                 if (tg3_test_memory(tp) != 0) {
13811                         etest->flags |= ETH_TEST_FL_FAILED;
13812                         data[TG3_MEMORY_TEST] = 1;
13813                 }
13814
13815                 if (doextlpbk)
13816                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13817
13818                 if (tg3_test_loopback(tp, data, doextlpbk))
13819                         etest->flags |= ETH_TEST_FL_FAILED;
13820
13821                 tg3_full_unlock(tp);
13822
13823                 if (tg3_test_interrupt(tp) != 0) {
13824                         etest->flags |= ETH_TEST_FL_FAILED;
13825                         data[TG3_INTERRUPT_TEST] = 1;
13826                 }
13827
13828                 tg3_full_lock(tp, 0);
13829
13830                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13831                 if (netif_running(dev)) {
13832                         tg3_flag_set(tp, INIT_COMPLETE);
13833                         err2 = tg3_restart_hw(tp, true);
13834                         if (!err2)
13835                                 tg3_netif_start(tp);
13836                 }
13837
13838                 tg3_full_unlock(tp);
13839
13840                 if (irq_sync && !err2)
13841                         tg3_phy_start(tp);
13842         }
13843         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13844                 tg3_power_down_prepare(tp);
13845
13846 }
13847
13848 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13849 {
13850         struct tg3 *tp = netdev_priv(dev);
13851         struct hwtstamp_config stmpconf;
13852
13853         if (!tg3_flag(tp, PTP_CAPABLE))
13854                 return -EOPNOTSUPP;
13855
13856         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13857                 return -EFAULT;
13858
13859         if (stmpconf.flags)
13860                 return -EINVAL;
13861
13862         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13863             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13864                 return -ERANGE;
13865
13866         switch (stmpconf.rx_filter) {
13867         case HWTSTAMP_FILTER_NONE:
13868                 tp->rxptpctl = 0;
13869                 break;
13870         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13871                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13872                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13873                 break;
13874         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13875                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13876                                TG3_RX_PTP_CTL_SYNC_EVNT;
13877                 break;
13878         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13879                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13880                                TG3_RX_PTP_CTL_DELAY_REQ;
13881                 break;
13882         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13883                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13884                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13885                 break;
13886         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13887                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13888                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13889                 break;
13890         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13891                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13892                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13893                 break;
13894         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13895                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13896                                TG3_RX_PTP_CTL_SYNC_EVNT;
13897                 break;
13898         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13899                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13900                                TG3_RX_PTP_CTL_SYNC_EVNT;
13901                 break;
13902         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13903                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13904                                TG3_RX_PTP_CTL_SYNC_EVNT;
13905                 break;
13906         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13907                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13908                                TG3_RX_PTP_CTL_DELAY_REQ;
13909                 break;
13910         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13911                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13912                                TG3_RX_PTP_CTL_DELAY_REQ;
13913                 break;
13914         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13915                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13916                                TG3_RX_PTP_CTL_DELAY_REQ;
13917                 break;
13918         default:
13919                 return -ERANGE;
13920         }
13921
13922         if (netif_running(dev) && tp->rxptpctl)
13923                 tw32(TG3_RX_PTP_CTL,
13924                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13925
13926         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13927                 tg3_flag_set(tp, TX_TSTAMP_EN);
13928         else
13929                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13930
13931         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13932                 -EFAULT : 0;
13933 }
13934
13935 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13936 {
13937         struct tg3 *tp = netdev_priv(dev);
13938         struct hwtstamp_config stmpconf;
13939
13940         if (!tg3_flag(tp, PTP_CAPABLE))
13941                 return -EOPNOTSUPP;
13942
13943         stmpconf.flags = 0;
13944         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13945                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13946
13947         switch (tp->rxptpctl) {
13948         case 0:
13949                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13950                 break;
13951         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13959                 break;
13960         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13961                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13962                 break;
13963         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13964                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13965                 break;
13966         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13967                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13968                 break;
13969         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13970                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13971                 break;
13972         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13973                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13974                 break;
13975         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13976                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13977                 break;
13978         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13979                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13980                 break;
13981         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13982                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13983                 break;
13984         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13985                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13986                 break;
13987         default:
13988                 WARN_ON_ONCE(1);
13989                 return -ERANGE;
13990         }
13991
13992         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13993                 -EFAULT : 0;
13994 }
13995
13996 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13997 {
13998         struct mii_ioctl_data *data = if_mii(ifr);
13999         struct tg3 *tp = netdev_priv(dev);
14000         int err;
14001
14002         if (tg3_flag(tp, USE_PHYLIB)) {
14003                 struct phy_device *phydev;
14004                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14005                         return -EAGAIN;
14006                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14007                 return phy_mii_ioctl(phydev, ifr, cmd);
14008         }
14009
14010         switch (cmd) {
14011         case SIOCGMIIPHY:
14012                 data->phy_id = tp->phy_addr;
14013
14014                 /* fall through */
14015         case SIOCGMIIREG: {
14016                 u32 mii_regval;
14017
14018                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14019                         break;                  /* We have no PHY */
14020
14021                 if (!netif_running(dev))
14022                         return -EAGAIN;
14023
14024                 spin_lock_bh(&tp->lock);
14025                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14026                                     data->reg_num & 0x1f, &mii_regval);
14027                 spin_unlock_bh(&tp->lock);
14028
14029                 data->val_out = mii_regval;
14030
14031                 return err;
14032         }
14033
14034         case SIOCSMIIREG:
14035                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14036                         break;                  /* We have no PHY */
14037
14038                 if (!netif_running(dev))
14039                         return -EAGAIN;
14040
14041                 spin_lock_bh(&tp->lock);
14042                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14043                                      data->reg_num & 0x1f, data->val_in);
14044                 spin_unlock_bh(&tp->lock);
14045
14046                 return err;
14047
14048         case SIOCSHWTSTAMP:
14049                 return tg3_hwtstamp_set(dev, ifr);
14050
14051         case SIOCGHWTSTAMP:
14052                 return tg3_hwtstamp_get(dev, ifr);
14053
14054         default:
14055                 /* do nothing */
14056                 break;
14057         }
14058         return -EOPNOTSUPP;
14059 }
14060
14061 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14062 {
14063         struct tg3 *tp = netdev_priv(dev);
14064
14065         memcpy(ec, &tp->coal, sizeof(*ec));
14066         return 0;
14067 }
14068
14069 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14070 {
14071         struct tg3 *tp = netdev_priv(dev);
14072         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14073         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14074
14075         if (!tg3_flag(tp, 5705_PLUS)) {
14076                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14077                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14078                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14079                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14080         }
14081
14082         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14083             (!ec->rx_coalesce_usecs) ||
14084             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14085             (!ec->tx_coalesce_usecs) ||
14086             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14087             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14088             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14089             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14090             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14091             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14092             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14093             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14094                 return -EINVAL;
14095
14096         /* Only copy relevant parameters, ignore all others. */
14097         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14098         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14099         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14100         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14101         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14102         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14103         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14104         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14105         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14106
14107         if (netif_running(dev)) {
14108                 tg3_full_lock(tp, 0);
14109                 __tg3_set_coalesce(tp, &tp->coal);
14110                 tg3_full_unlock(tp);
14111         }
14112         return 0;
14113 }
14114
14115 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14116 {
14117         struct tg3 *tp = netdev_priv(dev);
14118
14119         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14120                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14121                 return -EOPNOTSUPP;
14122         }
14123
14124         if (edata->advertised != tp->eee.advertised) {
14125                 netdev_warn(tp->dev,
14126                             "Direct manipulation of EEE advertisement is not supported\n");
14127                 return -EINVAL;
14128         }
14129
14130         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14131                 netdev_warn(tp->dev,
14132                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14133                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14134                 return -EINVAL;
14135         }
14136
14137         tp->eee = *edata;
14138
14139         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14140         tg3_warn_mgmt_link_flap(tp);
14141
14142         if (netif_running(tp->dev)) {
14143                 tg3_full_lock(tp, 0);
14144                 tg3_setup_eee(tp);
14145                 tg3_phy_reset(tp);
14146                 tg3_full_unlock(tp);
14147         }
14148
14149         return 0;
14150 }
14151
14152 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14153 {
14154         struct tg3 *tp = netdev_priv(dev);
14155
14156         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14157                 netdev_warn(tp->dev,
14158                             "Board does not support EEE!\n");
14159                 return -EOPNOTSUPP;
14160         }
14161
14162         *edata = tp->eee;
14163         return 0;
14164 }
14165
14166 static const struct ethtool_ops tg3_ethtool_ops = {
14167         .get_drvinfo            = tg3_get_drvinfo,
14168         .get_regs_len           = tg3_get_regs_len,
14169         .get_regs               = tg3_get_regs,
14170         .get_wol                = tg3_get_wol,
14171         .set_wol                = tg3_set_wol,
14172         .get_msglevel           = tg3_get_msglevel,
14173         .set_msglevel           = tg3_set_msglevel,
14174         .nway_reset             = tg3_nway_reset,
14175         .get_link               = ethtool_op_get_link,
14176         .get_eeprom_len         = tg3_get_eeprom_len,
14177         .get_eeprom             = tg3_get_eeprom,
14178         .set_eeprom             = tg3_set_eeprom,
14179         .get_ringparam          = tg3_get_ringparam,
14180         .set_ringparam          = tg3_set_ringparam,
14181         .get_pauseparam         = tg3_get_pauseparam,
14182         .set_pauseparam         = tg3_set_pauseparam,
14183         .self_test              = tg3_self_test,
14184         .get_strings            = tg3_get_strings,
14185         .set_phys_id            = tg3_set_phys_id,
14186         .get_ethtool_stats      = tg3_get_ethtool_stats,
14187         .get_coalesce           = tg3_get_coalesce,
14188         .set_coalesce           = tg3_set_coalesce,
14189         .get_sset_count         = tg3_get_sset_count,
14190         .get_rxnfc              = tg3_get_rxnfc,
14191         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14192         .get_rxfh               = tg3_get_rxfh,
14193         .set_rxfh               = tg3_set_rxfh,
14194         .get_channels           = tg3_get_channels,
14195         .set_channels           = tg3_set_channels,
14196         .get_ts_info            = tg3_get_ts_info,
14197         .get_eee                = tg3_get_eee,
14198         .set_eee                = tg3_set_eee,
14199         .get_link_ksettings     = tg3_get_link_ksettings,
14200         .set_link_ksettings     = tg3_set_link_ksettings,
14201 };
14202
14203 static void tg3_get_stats64(struct net_device *dev,
14204                             struct rtnl_link_stats64 *stats)
14205 {
14206         struct tg3 *tp = netdev_priv(dev);
14207
14208         spin_lock_bh(&tp->lock);
14209         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14210                 *stats = tp->net_stats_prev;
14211                 spin_unlock_bh(&tp->lock);
14212                 return;
14213         }
14214
14215         tg3_get_nstats(tp, stats);
14216         spin_unlock_bh(&tp->lock);
14217 }
14218
14219 static void tg3_set_rx_mode(struct net_device *dev)
14220 {
14221         struct tg3 *tp = netdev_priv(dev);
14222
14223         if (!netif_running(dev))
14224                 return;
14225
14226         tg3_full_lock(tp, 0);
14227         __tg3_set_rx_mode(dev);
14228         tg3_full_unlock(tp);
14229 }
14230
14231 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14232                                int new_mtu)
14233 {
14234         dev->mtu = new_mtu;
14235
14236         if (new_mtu > ETH_DATA_LEN) {
14237                 if (tg3_flag(tp, 5780_CLASS)) {
14238                         netdev_update_features(dev);
14239                         tg3_flag_clear(tp, TSO_CAPABLE);
14240                 } else {
14241                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14242                 }
14243         } else {
14244                 if (tg3_flag(tp, 5780_CLASS)) {
14245                         tg3_flag_set(tp, TSO_CAPABLE);
14246                         netdev_update_features(dev);
14247                 }
14248                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14249         }
14250 }
14251
14252 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14253 {
14254         struct tg3 *tp = netdev_priv(dev);
14255         int err;
14256         bool reset_phy = false;
14257
14258         if (!netif_running(dev)) {
14259                 /* We'll just catch it later when the
14260                  * device is up'd.
14261                  */
14262                 tg3_set_mtu(dev, tp, new_mtu);
14263                 return 0;
14264         }
14265
14266         tg3_phy_stop(tp);
14267
14268         tg3_netif_stop(tp);
14269
14270         tg3_set_mtu(dev, tp, new_mtu);
14271
14272         tg3_full_lock(tp, 1);
14273
14274         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14275
14276         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14277          * breaks all requests to 256 bytes.
14278          */
14279         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14280             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14281             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14282             tg3_asic_rev(tp) == ASIC_REV_5720)
14283                 reset_phy = true;
14284
14285         err = tg3_restart_hw(tp, reset_phy);
14286
14287         if (!err)
14288                 tg3_netif_start(tp);
14289
14290         tg3_full_unlock(tp);
14291
14292         if (!err)
14293                 tg3_phy_start(tp);
14294
14295         return err;
14296 }
14297
14298 static const struct net_device_ops tg3_netdev_ops = {
14299         .ndo_open               = tg3_open,
14300         .ndo_stop               = tg3_close,
14301         .ndo_start_xmit         = tg3_start_xmit,
14302         .ndo_get_stats64        = tg3_get_stats64,
14303         .ndo_validate_addr      = eth_validate_addr,
14304         .ndo_set_rx_mode        = tg3_set_rx_mode,
14305         .ndo_set_mac_address    = tg3_set_mac_addr,
14306         .ndo_do_ioctl           = tg3_ioctl,
14307         .ndo_tx_timeout         = tg3_tx_timeout,
14308         .ndo_change_mtu         = tg3_change_mtu,
14309         .ndo_fix_features       = tg3_fix_features,
14310         .ndo_set_features       = tg3_set_features,
14311 #ifdef CONFIG_NET_POLL_CONTROLLER
14312         .ndo_poll_controller    = tg3_poll_controller,
14313 #endif
14314 };
14315
14316 static void tg3_get_eeprom_size(struct tg3 *tp)
14317 {
14318         u32 cursize, val, magic;
14319
14320         tp->nvram_size = EEPROM_CHIP_SIZE;
14321
14322         if (tg3_nvram_read(tp, 0, &magic) != 0)
14323                 return;
14324
14325         if ((magic != TG3_EEPROM_MAGIC) &&
14326             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14327             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14328                 return;
14329
14330         /*
14331          * Size the chip by reading offsets at increasing powers of two.
14332          * When we encounter our validation signature, we know the addressing
14333          * has wrapped around, and thus have our chip size.
14334          */
14335         cursize = 0x10;
14336
14337         while (cursize < tp->nvram_size) {
14338                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14339                         return;
14340
14341                 if (val == magic)
14342                         break;
14343
14344                 cursize <<= 1;
14345         }
14346
14347         tp->nvram_size = cursize;
14348 }
14349
14350 static void tg3_get_nvram_size(struct tg3 *tp)
14351 {
14352         u32 val;
14353
14354         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14355                 return;
14356
14357         /* Selfboot format */
14358         if (val != TG3_EEPROM_MAGIC) {
14359                 tg3_get_eeprom_size(tp);
14360                 return;
14361         }
14362
14363         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14364                 if (val != 0) {
14365                         /* This is confusing.  We want to operate on the
14366                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14367                          * call will read from NVRAM and byteswap the data
14368                          * according to the byteswapping settings for all
14369                          * other register accesses.  This ensures the data we
14370                          * want will always reside in the lower 16-bits.
14371                          * However, the data in NVRAM is in LE format, which
14372                          * means the data from the NVRAM read will always be
14373                          * opposite the endianness of the CPU.  The 16-bit
14374                          * byteswap then brings the data to CPU endianness.
14375                          */
14376                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14377                         return;
14378                 }
14379         }
14380         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14381 }
14382
14383 static void tg3_get_nvram_info(struct tg3 *tp)
14384 {
14385         u32 nvcfg1;
14386
14387         nvcfg1 = tr32(NVRAM_CFG1);
14388         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14389                 tg3_flag_set(tp, FLASH);
14390         } else {
14391                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14392                 tw32(NVRAM_CFG1, nvcfg1);
14393         }
14394
14395         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14396             tg3_flag(tp, 5780_CLASS)) {
14397                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14398                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14399                         tp->nvram_jedecnum = JEDEC_ATMEL;
14400                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14401                         tg3_flag_set(tp, NVRAM_BUFFERED);
14402                         break;
14403                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14404                         tp->nvram_jedecnum = JEDEC_ATMEL;
14405                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14406                         break;
14407                 case FLASH_VENDOR_ATMEL_EEPROM:
14408                         tp->nvram_jedecnum = JEDEC_ATMEL;
14409                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14410                         tg3_flag_set(tp, NVRAM_BUFFERED);
14411                         break;
14412                 case FLASH_VENDOR_ST:
14413                         tp->nvram_jedecnum = JEDEC_ST;
14414                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14415                         tg3_flag_set(tp, NVRAM_BUFFERED);
14416                         break;
14417                 case FLASH_VENDOR_SAIFUN:
14418                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14419                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14420                         break;
14421                 case FLASH_VENDOR_SST_SMALL:
14422                 case FLASH_VENDOR_SST_LARGE:
14423                         tp->nvram_jedecnum = JEDEC_SST;
14424                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14425                         break;
14426                 }
14427         } else {
14428                 tp->nvram_jedecnum = JEDEC_ATMEL;
14429                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14430                 tg3_flag_set(tp, NVRAM_BUFFERED);
14431         }
14432 }
14433
14434 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14435 {
14436         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14437         case FLASH_5752PAGE_SIZE_256:
14438                 tp->nvram_pagesize = 256;
14439                 break;
14440         case FLASH_5752PAGE_SIZE_512:
14441                 tp->nvram_pagesize = 512;
14442                 break;
14443         case FLASH_5752PAGE_SIZE_1K:
14444                 tp->nvram_pagesize = 1024;
14445                 break;
14446         case FLASH_5752PAGE_SIZE_2K:
14447                 tp->nvram_pagesize = 2048;
14448                 break;
14449         case FLASH_5752PAGE_SIZE_4K:
14450                 tp->nvram_pagesize = 4096;
14451                 break;
14452         case FLASH_5752PAGE_SIZE_264:
14453                 tp->nvram_pagesize = 264;
14454                 break;
14455         case FLASH_5752PAGE_SIZE_528:
14456                 tp->nvram_pagesize = 528;
14457                 break;
14458         }
14459 }
14460
14461 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14462 {
14463         u32 nvcfg1;
14464
14465         nvcfg1 = tr32(NVRAM_CFG1);
14466
14467         /* NVRAM protection for TPM */
14468         if (nvcfg1 & (1 << 27))
14469                 tg3_flag_set(tp, PROTECTED_NVRAM);
14470
14471         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14472         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14473         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14474                 tp->nvram_jedecnum = JEDEC_ATMEL;
14475                 tg3_flag_set(tp, NVRAM_BUFFERED);
14476                 break;
14477         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14478                 tp->nvram_jedecnum = JEDEC_ATMEL;
14479                 tg3_flag_set(tp, NVRAM_BUFFERED);
14480                 tg3_flag_set(tp, FLASH);
14481                 break;
14482         case FLASH_5752VENDOR_ST_M45PE10:
14483         case FLASH_5752VENDOR_ST_M45PE20:
14484         case FLASH_5752VENDOR_ST_M45PE40:
14485                 tp->nvram_jedecnum = JEDEC_ST;
14486                 tg3_flag_set(tp, NVRAM_BUFFERED);
14487                 tg3_flag_set(tp, FLASH);
14488                 break;
14489         }
14490
14491         if (tg3_flag(tp, FLASH)) {
14492                 tg3_nvram_get_pagesize(tp, nvcfg1);
14493         } else {
14494                 /* For eeprom, set pagesize to maximum eeprom size */
14495                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14496
14497                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14498                 tw32(NVRAM_CFG1, nvcfg1);
14499         }
14500 }
14501
14502 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14503 {
14504         u32 nvcfg1, protect = 0;
14505
14506         nvcfg1 = tr32(NVRAM_CFG1);
14507
14508         /* NVRAM protection for TPM */
14509         if (nvcfg1 & (1 << 27)) {
14510                 tg3_flag_set(tp, PROTECTED_NVRAM);
14511                 protect = 1;
14512         }
14513
14514         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14515         switch (nvcfg1) {
14516         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14517         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14518         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14519         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14520                 tp->nvram_jedecnum = JEDEC_ATMEL;
14521                 tg3_flag_set(tp, NVRAM_BUFFERED);
14522                 tg3_flag_set(tp, FLASH);
14523                 tp->nvram_pagesize = 264;
14524                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14525                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14526                         tp->nvram_size = (protect ? 0x3e200 :
14527                                           TG3_NVRAM_SIZE_512KB);
14528                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14529                         tp->nvram_size = (protect ? 0x1f200 :
14530                                           TG3_NVRAM_SIZE_256KB);
14531                 else
14532                         tp->nvram_size = (protect ? 0x1f200 :
14533                                           TG3_NVRAM_SIZE_128KB);
14534                 break;
14535         case FLASH_5752VENDOR_ST_M45PE10:
14536         case FLASH_5752VENDOR_ST_M45PE20:
14537         case FLASH_5752VENDOR_ST_M45PE40:
14538                 tp->nvram_jedecnum = JEDEC_ST;
14539                 tg3_flag_set(tp, NVRAM_BUFFERED);
14540                 tg3_flag_set(tp, FLASH);
14541                 tp->nvram_pagesize = 256;
14542                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14543                         tp->nvram_size = (protect ?
14544                                           TG3_NVRAM_SIZE_64KB :
14545                                           TG3_NVRAM_SIZE_128KB);
14546                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14547                         tp->nvram_size = (protect ?
14548                                           TG3_NVRAM_SIZE_64KB :
14549                                           TG3_NVRAM_SIZE_256KB);
14550                 else
14551                         tp->nvram_size = (protect ?
14552                                           TG3_NVRAM_SIZE_128KB :
14553                                           TG3_NVRAM_SIZE_512KB);
14554                 break;
14555         }
14556 }
14557
14558 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14559 {
14560         u32 nvcfg1;
14561
14562         nvcfg1 = tr32(NVRAM_CFG1);
14563
14564         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14565         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14566         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14567         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14568         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14569                 tp->nvram_jedecnum = JEDEC_ATMEL;
14570                 tg3_flag_set(tp, NVRAM_BUFFERED);
14571                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14572
14573                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14574                 tw32(NVRAM_CFG1, nvcfg1);
14575                 break;
14576         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14577         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14578         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14579         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14580                 tp->nvram_jedecnum = JEDEC_ATMEL;
14581                 tg3_flag_set(tp, NVRAM_BUFFERED);
14582                 tg3_flag_set(tp, FLASH);
14583                 tp->nvram_pagesize = 264;
14584                 break;
14585         case FLASH_5752VENDOR_ST_M45PE10:
14586         case FLASH_5752VENDOR_ST_M45PE20:
14587         case FLASH_5752VENDOR_ST_M45PE40:
14588                 tp->nvram_jedecnum = JEDEC_ST;
14589                 tg3_flag_set(tp, NVRAM_BUFFERED);
14590                 tg3_flag_set(tp, FLASH);
14591                 tp->nvram_pagesize = 256;
14592                 break;
14593         }
14594 }
14595
14596 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14597 {
14598         u32 nvcfg1, protect = 0;
14599
14600         nvcfg1 = tr32(NVRAM_CFG1);
14601
14602         /* NVRAM protection for TPM */
14603         if (nvcfg1 & (1 << 27)) {
14604                 tg3_flag_set(tp, PROTECTED_NVRAM);
14605                 protect = 1;
14606         }
14607
14608         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14609         switch (nvcfg1) {
14610         case FLASH_5761VENDOR_ATMEL_ADB021D:
14611         case FLASH_5761VENDOR_ATMEL_ADB041D:
14612         case FLASH_5761VENDOR_ATMEL_ADB081D:
14613         case FLASH_5761VENDOR_ATMEL_ADB161D:
14614         case FLASH_5761VENDOR_ATMEL_MDB021D:
14615         case FLASH_5761VENDOR_ATMEL_MDB041D:
14616         case FLASH_5761VENDOR_ATMEL_MDB081D:
14617         case FLASH_5761VENDOR_ATMEL_MDB161D:
14618                 tp->nvram_jedecnum = JEDEC_ATMEL;
14619                 tg3_flag_set(tp, NVRAM_BUFFERED);
14620                 tg3_flag_set(tp, FLASH);
14621                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14622                 tp->nvram_pagesize = 256;
14623                 break;
14624         case FLASH_5761VENDOR_ST_A_M45PE20:
14625         case FLASH_5761VENDOR_ST_A_M45PE40:
14626         case FLASH_5761VENDOR_ST_A_M45PE80:
14627         case FLASH_5761VENDOR_ST_A_M45PE16:
14628         case FLASH_5761VENDOR_ST_M_M45PE20:
14629         case FLASH_5761VENDOR_ST_M_M45PE40:
14630         case FLASH_5761VENDOR_ST_M_M45PE80:
14631         case FLASH_5761VENDOR_ST_M_M45PE16:
14632                 tp->nvram_jedecnum = JEDEC_ST;
14633                 tg3_flag_set(tp, NVRAM_BUFFERED);
14634                 tg3_flag_set(tp, FLASH);
14635                 tp->nvram_pagesize = 256;
14636                 break;
14637         }
14638
14639         if (protect) {
14640                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14641         } else {
14642                 switch (nvcfg1) {
14643                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14644                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14645                 case FLASH_5761VENDOR_ST_A_M45PE16:
14646                 case FLASH_5761VENDOR_ST_M_M45PE16:
14647                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14648                         break;
14649                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14650                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14651                 case FLASH_5761VENDOR_ST_A_M45PE80:
14652                 case FLASH_5761VENDOR_ST_M_M45PE80:
14653                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14654                         break;
14655                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14656                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14657                 case FLASH_5761VENDOR_ST_A_M45PE40:
14658                 case FLASH_5761VENDOR_ST_M_M45PE40:
14659                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14660                         break;
14661                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14662                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14663                 case FLASH_5761VENDOR_ST_A_M45PE20:
14664                 case FLASH_5761VENDOR_ST_M_M45PE20:
14665                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14666                         break;
14667                 }
14668         }
14669 }
14670
14671 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14672 {
14673         tp->nvram_jedecnum = JEDEC_ATMEL;
14674         tg3_flag_set(tp, NVRAM_BUFFERED);
14675         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14676 }
14677
14678 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14679 {
14680         u32 nvcfg1;
14681
14682         nvcfg1 = tr32(NVRAM_CFG1);
14683
14684         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14685         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14686         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14687                 tp->nvram_jedecnum = JEDEC_ATMEL;
14688                 tg3_flag_set(tp, NVRAM_BUFFERED);
14689                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14690
14691                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14692                 tw32(NVRAM_CFG1, nvcfg1);
14693                 return;
14694         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14695         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14696         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14697         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14698         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14699         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14700         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14701                 tp->nvram_jedecnum = JEDEC_ATMEL;
14702                 tg3_flag_set(tp, NVRAM_BUFFERED);
14703                 tg3_flag_set(tp, FLASH);
14704
14705                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14706                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14707                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14708                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14709                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14710                         break;
14711                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14712                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14713                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14714                         break;
14715                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14716                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14717                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14718                         break;
14719                 }
14720                 break;
14721         case FLASH_5752VENDOR_ST_M45PE10:
14722         case FLASH_5752VENDOR_ST_M45PE20:
14723         case FLASH_5752VENDOR_ST_M45PE40:
14724                 tp->nvram_jedecnum = JEDEC_ST;
14725                 tg3_flag_set(tp, NVRAM_BUFFERED);
14726                 tg3_flag_set(tp, FLASH);
14727
14728                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14729                 case FLASH_5752VENDOR_ST_M45PE10:
14730                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14731                         break;
14732                 case FLASH_5752VENDOR_ST_M45PE20:
14733                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14734                         break;
14735                 case FLASH_5752VENDOR_ST_M45PE40:
14736                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14737                         break;
14738                 }
14739                 break;
14740         default:
14741                 tg3_flag_set(tp, NO_NVRAM);
14742                 return;
14743         }
14744
14745         tg3_nvram_get_pagesize(tp, nvcfg1);
14746         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14747                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14748 }
14749
14750
14751 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14752 {
14753         u32 nvcfg1;
14754
14755         nvcfg1 = tr32(NVRAM_CFG1);
14756
14757         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14758         case FLASH_5717VENDOR_ATMEL_EEPROM:
14759         case FLASH_5717VENDOR_MICRO_EEPROM:
14760                 tp->nvram_jedecnum = JEDEC_ATMEL;
14761                 tg3_flag_set(tp, NVRAM_BUFFERED);
14762                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14763
14764                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14765                 tw32(NVRAM_CFG1, nvcfg1);
14766                 return;
14767         case FLASH_5717VENDOR_ATMEL_MDB011D:
14768         case FLASH_5717VENDOR_ATMEL_ADB011B:
14769         case FLASH_5717VENDOR_ATMEL_ADB011D:
14770         case FLASH_5717VENDOR_ATMEL_MDB021D:
14771         case FLASH_5717VENDOR_ATMEL_ADB021B:
14772         case FLASH_5717VENDOR_ATMEL_ADB021D:
14773         case FLASH_5717VENDOR_ATMEL_45USPT:
14774                 tp->nvram_jedecnum = JEDEC_ATMEL;
14775                 tg3_flag_set(tp, NVRAM_BUFFERED);
14776                 tg3_flag_set(tp, FLASH);
14777
14778                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14779                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14780                         /* Detect size with tg3_nvram_get_size() */
14781                         break;
14782                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14783                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14784                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14785                         break;
14786                 default:
14787                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14788                         break;
14789                 }
14790                 break;
14791         case FLASH_5717VENDOR_ST_M_M25PE10:
14792         case FLASH_5717VENDOR_ST_A_M25PE10:
14793         case FLASH_5717VENDOR_ST_M_M45PE10:
14794         case FLASH_5717VENDOR_ST_A_M45PE10:
14795         case FLASH_5717VENDOR_ST_M_M25PE20:
14796         case FLASH_5717VENDOR_ST_A_M25PE20:
14797         case FLASH_5717VENDOR_ST_M_M45PE20:
14798         case FLASH_5717VENDOR_ST_A_M45PE20:
14799         case FLASH_5717VENDOR_ST_25USPT:
14800         case FLASH_5717VENDOR_ST_45USPT:
14801                 tp->nvram_jedecnum = JEDEC_ST;
14802                 tg3_flag_set(tp, NVRAM_BUFFERED);
14803                 tg3_flag_set(tp, FLASH);
14804
14805                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14806                 case FLASH_5717VENDOR_ST_M_M25PE20:
14807                 case FLASH_5717VENDOR_ST_M_M45PE20:
14808                         /* Detect size with tg3_nvram_get_size() */
14809                         break;
14810                 case FLASH_5717VENDOR_ST_A_M25PE20:
14811                 case FLASH_5717VENDOR_ST_A_M45PE20:
14812                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14813                         break;
14814                 default:
14815                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14816                         break;
14817                 }
14818                 break;
14819         default:
14820                 tg3_flag_set(tp, NO_NVRAM);
14821                 return;
14822         }
14823
14824         tg3_nvram_get_pagesize(tp, nvcfg1);
14825         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14826                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14827 }
14828
14829 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14830 {
14831         u32 nvcfg1, nvmpinstrp, nv_status;
14832
14833         nvcfg1 = tr32(NVRAM_CFG1);
14834         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14835
14836         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14837                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14838                         tg3_flag_set(tp, NO_NVRAM);
14839                         return;
14840                 }
14841
14842                 switch (nvmpinstrp) {
14843                 case FLASH_5762_MX25L_100:
14844                 case FLASH_5762_MX25L_200:
14845                 case FLASH_5762_MX25L_400:
14846                 case FLASH_5762_MX25L_800:
14847                 case FLASH_5762_MX25L_160_320:
14848                         tp->nvram_pagesize = 4096;
14849                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14850                         tg3_flag_set(tp, NVRAM_BUFFERED);
14851                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14852                         tg3_flag_set(tp, FLASH);
14853                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14854                         tp->nvram_size =
14855                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14856                                                 AUTOSENSE_DEVID_MASK)
14857                                         << AUTOSENSE_SIZE_IN_MB);
14858                         return;
14859
14860                 case FLASH_5762_EEPROM_HD:
14861                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14862                         break;
14863                 case FLASH_5762_EEPROM_LD:
14864                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14865                         break;
14866                 case FLASH_5720VENDOR_M_ST_M45PE20:
14867                         /* This pinstrap supports multiple sizes, so force it
14868                          * to read the actual size from location 0xf0.
14869                          */
14870                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14871                         break;
14872                 }
14873         }
14874
14875         switch (nvmpinstrp) {
14876         case FLASH_5720_EEPROM_HD:
14877         case FLASH_5720_EEPROM_LD:
14878                 tp->nvram_jedecnum = JEDEC_ATMEL;
14879                 tg3_flag_set(tp, NVRAM_BUFFERED);
14880
14881                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14882                 tw32(NVRAM_CFG1, nvcfg1);
14883                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14884                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14885                 else
14886                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14887                 return;
14888         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14889         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14890         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14891         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14892         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14893         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14894         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14895         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14896         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14897         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14898         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14899         case FLASH_5720VENDOR_ATMEL_45USPT:
14900                 tp->nvram_jedecnum = JEDEC_ATMEL;
14901                 tg3_flag_set(tp, NVRAM_BUFFERED);
14902                 tg3_flag_set(tp, FLASH);
14903
14904                 switch (nvmpinstrp) {
14905                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14906                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14907                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14908                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14909                         break;
14910                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14911                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14912                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14913                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14914                         break;
14915                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14916                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14917                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14918                         break;
14919                 default:
14920                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14921                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14922                         break;
14923                 }
14924                 break;
14925         case FLASH_5720VENDOR_M_ST_M25PE10:
14926         case FLASH_5720VENDOR_M_ST_M45PE10:
14927         case FLASH_5720VENDOR_A_ST_M25PE10:
14928         case FLASH_5720VENDOR_A_ST_M45PE10:
14929         case FLASH_5720VENDOR_M_ST_M25PE20:
14930         case FLASH_5720VENDOR_M_ST_M45PE20:
14931         case FLASH_5720VENDOR_A_ST_M25PE20:
14932         case FLASH_5720VENDOR_A_ST_M45PE20:
14933         case FLASH_5720VENDOR_M_ST_M25PE40:
14934         case FLASH_5720VENDOR_M_ST_M45PE40:
14935         case FLASH_5720VENDOR_A_ST_M25PE40:
14936         case FLASH_5720VENDOR_A_ST_M45PE40:
14937         case FLASH_5720VENDOR_M_ST_M25PE80:
14938         case FLASH_5720VENDOR_M_ST_M45PE80:
14939         case FLASH_5720VENDOR_A_ST_M25PE80:
14940         case FLASH_5720VENDOR_A_ST_M45PE80:
14941         case FLASH_5720VENDOR_ST_25USPT:
14942         case FLASH_5720VENDOR_ST_45USPT:
14943                 tp->nvram_jedecnum = JEDEC_ST;
14944                 tg3_flag_set(tp, NVRAM_BUFFERED);
14945                 tg3_flag_set(tp, FLASH);
14946
14947                 switch (nvmpinstrp) {
14948                 case FLASH_5720VENDOR_M_ST_M25PE20:
14949                 case FLASH_5720VENDOR_M_ST_M45PE20:
14950                 case FLASH_5720VENDOR_A_ST_M25PE20:
14951                 case FLASH_5720VENDOR_A_ST_M45PE20:
14952                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14953                         break;
14954                 case FLASH_5720VENDOR_M_ST_M25PE40:
14955                 case FLASH_5720VENDOR_M_ST_M45PE40:
14956                 case FLASH_5720VENDOR_A_ST_M25PE40:
14957                 case FLASH_5720VENDOR_A_ST_M45PE40:
14958                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14959                         break;
14960                 case FLASH_5720VENDOR_M_ST_M25PE80:
14961                 case FLASH_5720VENDOR_M_ST_M45PE80:
14962                 case FLASH_5720VENDOR_A_ST_M25PE80:
14963                 case FLASH_5720VENDOR_A_ST_M45PE80:
14964                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14965                         break;
14966                 default:
14967                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14968                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14969                         break;
14970                 }
14971                 break;
14972         default:
14973                 tg3_flag_set(tp, NO_NVRAM);
14974                 return;
14975         }
14976
14977         tg3_nvram_get_pagesize(tp, nvcfg1);
14978         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14979                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14980
14981         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14982                 u32 val;
14983
14984                 if (tg3_nvram_read(tp, 0, &val))
14985                         return;
14986
14987                 if (val != TG3_EEPROM_MAGIC &&
14988                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14989                         tg3_flag_set(tp, NO_NVRAM);
14990         }
14991 }
14992
14993 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14994 static void tg3_nvram_init(struct tg3 *tp)
14995 {
14996         if (tg3_flag(tp, IS_SSB_CORE)) {
14997                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14998                 tg3_flag_clear(tp, NVRAM);
14999                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15000                 tg3_flag_set(tp, NO_NVRAM);
15001                 return;
15002         }
15003
15004         tw32_f(GRC_EEPROM_ADDR,
15005              (EEPROM_ADDR_FSM_RESET |
15006               (EEPROM_DEFAULT_CLOCK_PERIOD <<
15007                EEPROM_ADDR_CLKPERD_SHIFT)));
15008
15009         msleep(1);
15010
15011         /* Enable seeprom accesses. */
15012         tw32_f(GRC_LOCAL_CTRL,
15013              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15014         udelay(100);
15015
15016         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15017             tg3_asic_rev(tp) != ASIC_REV_5701) {
15018                 tg3_flag_set(tp, NVRAM);
15019
15020                 if (tg3_nvram_lock(tp)) {
15021                         netdev_warn(tp->dev,
15022                                     "Cannot get nvram lock, %s failed\n",
15023                                     __func__);
15024                         return;
15025                 }
15026                 tg3_enable_nvram_access(tp);
15027
15028                 tp->nvram_size = 0;
15029
15030                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15031                         tg3_get_5752_nvram_info(tp);
15032                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15033                         tg3_get_5755_nvram_info(tp);
15034                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15035                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15036                          tg3_asic_rev(tp) == ASIC_REV_5785)
15037                         tg3_get_5787_nvram_info(tp);
15038                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15039                         tg3_get_5761_nvram_info(tp);
15040                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15041                         tg3_get_5906_nvram_info(tp);
15042                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15043                          tg3_flag(tp, 57765_CLASS))
15044                         tg3_get_57780_nvram_info(tp);
15045                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15046                          tg3_asic_rev(tp) == ASIC_REV_5719)
15047                         tg3_get_5717_nvram_info(tp);
15048                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15049                          tg3_asic_rev(tp) == ASIC_REV_5762)
15050                         tg3_get_5720_nvram_info(tp);
15051                 else
15052                         tg3_get_nvram_info(tp);
15053
15054                 if (tp->nvram_size == 0)
15055                         tg3_get_nvram_size(tp);
15056
15057                 tg3_disable_nvram_access(tp);
15058                 tg3_nvram_unlock(tp);
15059
15060         } else {
15061                 tg3_flag_clear(tp, NVRAM);
15062                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15063
15064                 tg3_get_eeprom_size(tp);
15065         }
15066 }
15067
15068 struct subsys_tbl_ent {
15069         u16 subsys_vendor, subsys_devid;
15070         u32 phy_id;
15071 };
15072
15073 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15074         /* Broadcom boards. */
15075         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15076           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15077         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15078           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15079         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15080           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15081         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15082           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15083         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15084           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15085         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15086           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15087         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15088           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15089         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15090           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15091         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15092           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15093         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15094           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15095         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15096           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15097
15098         /* 3com boards. */
15099         { TG3PCI_SUBVENDOR_ID_3COM,
15100           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15101         { TG3PCI_SUBVENDOR_ID_3COM,
15102           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15103         { TG3PCI_SUBVENDOR_ID_3COM,
15104           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15105         { TG3PCI_SUBVENDOR_ID_3COM,
15106           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15107         { TG3PCI_SUBVENDOR_ID_3COM,
15108           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15109
15110         /* DELL boards. */
15111         { TG3PCI_SUBVENDOR_ID_DELL,
15112           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15113         { TG3PCI_SUBVENDOR_ID_DELL,
15114           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15115         { TG3PCI_SUBVENDOR_ID_DELL,
15116           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15117         { TG3PCI_SUBVENDOR_ID_DELL,
15118           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15119
15120         /* Compaq boards. */
15121         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15122           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15123         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15124           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15125         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15126           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15127         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15128           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15129         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15130           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15131
15132         /* IBM boards. */
15133         { TG3PCI_SUBVENDOR_ID_IBM,
15134           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15135 };
15136
15137 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15138 {
15139         int i;
15140
15141         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15142                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15143                      tp->pdev->subsystem_vendor) &&
15144                     (subsys_id_to_phy_id[i].subsys_devid ==
15145                      tp->pdev->subsystem_device))
15146                         return &subsys_id_to_phy_id[i];
15147         }
15148         return NULL;
15149 }
15150
15151 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15152 {
15153         u32 val;
15154
15155         tp->phy_id = TG3_PHY_ID_INVALID;
15156         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15157
15158         /* Assume an onboard device and WOL capable by default.  */
15159         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15160         tg3_flag_set(tp, WOL_CAP);
15161
15162         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15163                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15164                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15165                         tg3_flag_set(tp, IS_NIC);
15166                 }
15167                 val = tr32(VCPU_CFGSHDW);
15168                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15169                         tg3_flag_set(tp, ASPM_WORKAROUND);
15170                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15171                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15172                         tg3_flag_set(tp, WOL_ENABLE);
15173                         device_set_wakeup_enable(&tp->pdev->dev, true);
15174                 }
15175                 goto done;
15176         }
15177
15178         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15179         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15180                 u32 nic_cfg, led_cfg;
15181                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15182                 u32 nic_phy_id, ver, eeprom_phy_id;
15183                 int eeprom_phy_serdes = 0;
15184
15185                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15186                 tp->nic_sram_data_cfg = nic_cfg;
15187
15188                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15189                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15190                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15191                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15192                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15193                     (ver > 0) && (ver < 0x100))
15194                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15195
15196                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15197                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15198
15199                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15200                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15201                     tg3_asic_rev(tp) == ASIC_REV_5720)
15202                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15203
15204                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15205                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15206                         eeprom_phy_serdes = 1;
15207
15208                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15209                 if (nic_phy_id != 0) {
15210                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15211                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15212
15213                         eeprom_phy_id  = (id1 >> 16) << 10;
15214                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15215                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15216                 } else
15217                         eeprom_phy_id = 0;
15218
15219                 tp->phy_id = eeprom_phy_id;
15220                 if (eeprom_phy_serdes) {
15221                         if (!tg3_flag(tp, 5705_PLUS))
15222                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15223                         else
15224                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15225                 }
15226
15227                 if (tg3_flag(tp, 5750_PLUS))
15228                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15229                                     SHASTA_EXT_LED_MODE_MASK);
15230                 else
15231                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15232
15233                 switch (led_cfg) {
15234                 default:
15235                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15236                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15237                         break;
15238
15239                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15240                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15241                         break;
15242
15243                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15244                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15245
15246                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15247                          * read on some older 5700/5701 bootcode.
15248                          */
15249                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15250                             tg3_asic_rev(tp) == ASIC_REV_5701)
15251                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15252
15253                         break;
15254
15255                 case SHASTA_EXT_LED_SHARED:
15256                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15257                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15258                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15259                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15260                                                  LED_CTRL_MODE_PHY_2);
15261
15262                         if (tg3_flag(tp, 5717_PLUS) ||
15263                             tg3_asic_rev(tp) == ASIC_REV_5762)
15264                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15265                                                 LED_CTRL_BLINK_RATE_MASK;
15266
15267                         break;
15268
15269                 case SHASTA_EXT_LED_MAC:
15270                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15271                         break;
15272
15273                 case SHASTA_EXT_LED_COMBO:
15274                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15275                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15276                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15277                                                  LED_CTRL_MODE_PHY_2);
15278                         break;
15279
15280                 }
15281
15282                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15283                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15284                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15285                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15286
15287                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15288                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15289
15290                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15291                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15292                         if ((tp->pdev->subsystem_vendor ==
15293                              PCI_VENDOR_ID_ARIMA) &&
15294                             (tp->pdev->subsystem_device == 0x205a ||
15295                              tp->pdev->subsystem_device == 0x2063))
15296                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15297                 } else {
15298                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15299                         tg3_flag_set(tp, IS_NIC);
15300                 }
15301
15302                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15303                         tg3_flag_set(tp, ENABLE_ASF);
15304                         if (tg3_flag(tp, 5750_PLUS))
15305                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15306                 }
15307
15308                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15309                     tg3_flag(tp, 5750_PLUS))
15310                         tg3_flag_set(tp, ENABLE_APE);
15311
15312                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15313                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15314                         tg3_flag_clear(tp, WOL_CAP);
15315
15316                 if (tg3_flag(tp, WOL_CAP) &&
15317                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15318                         tg3_flag_set(tp, WOL_ENABLE);
15319                         device_set_wakeup_enable(&tp->pdev->dev, true);
15320                 }
15321
15322                 if (cfg2 & (1 << 17))
15323                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15324
15325                 /* serdes signal pre-emphasis in register 0x590 set by */
15326                 /* bootcode if bit 18 is set */
15327                 if (cfg2 & (1 << 18))
15328                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15329
15330                 if ((tg3_flag(tp, 57765_PLUS) ||
15331                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15332                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15333                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15334                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15335
15336                 if (tg3_flag(tp, PCI_EXPRESS)) {
15337                         u32 cfg3;
15338
15339                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15340                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15341                             !tg3_flag(tp, 57765_PLUS) &&
15342                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15343                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15344                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15345                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15346                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15347                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15348                 }
15349
15350                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15351                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15352                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15353                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15354                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15355                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15356
15357                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15358                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15359         }
15360 done:
15361         if (tg3_flag(tp, WOL_CAP))
15362                 device_set_wakeup_enable(&tp->pdev->dev,
15363                                          tg3_flag(tp, WOL_ENABLE));
15364         else
15365                 device_set_wakeup_capable(&tp->pdev->dev, false);
15366 }
15367
15368 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15369 {
15370         int i, err;
15371         u32 val2, off = offset * 8;
15372
15373         err = tg3_nvram_lock(tp);
15374         if (err)
15375                 return err;
15376
15377         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15378         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15379                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15380         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15381         udelay(10);
15382
15383         for (i = 0; i < 100; i++) {
15384                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15385                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15386                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15387                         break;
15388                 }
15389                 udelay(10);
15390         }
15391
15392         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15393
15394         tg3_nvram_unlock(tp);
15395         if (val2 & APE_OTP_STATUS_CMD_DONE)
15396                 return 0;
15397
15398         return -EBUSY;
15399 }
15400
15401 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15402 {
15403         int i;
15404         u32 val;
15405
15406         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15407         tw32(OTP_CTRL, cmd);
15408
15409         /* Wait for up to 1 ms for command to execute. */
15410         for (i = 0; i < 100; i++) {
15411                 val = tr32(OTP_STATUS);
15412                 if (val & OTP_STATUS_CMD_DONE)
15413                         break;
15414                 udelay(10);
15415         }
15416
15417         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15418 }
15419
15420 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15421  * configuration is a 32-bit value that straddles the alignment boundary.
15422  * We do two 32-bit reads and then shift and merge the results.
15423  */
15424 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15425 {
15426         u32 bhalf_otp, thalf_otp;
15427
15428         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15429
15430         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15431                 return 0;
15432
15433         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15434
15435         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15436                 return 0;
15437
15438         thalf_otp = tr32(OTP_READ_DATA);
15439
15440         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15441
15442         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15443                 return 0;
15444
15445         bhalf_otp = tr32(OTP_READ_DATA);
15446
15447         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15448 }
15449
15450 static void tg3_phy_init_link_config(struct tg3 *tp)
15451 {
15452         u32 adv = ADVERTISED_Autoneg;
15453
15454         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15455                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15456                         adv |= ADVERTISED_1000baseT_Half;
15457                 adv |= ADVERTISED_1000baseT_Full;
15458         }
15459
15460         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15461                 adv |= ADVERTISED_100baseT_Half |
15462                        ADVERTISED_100baseT_Full |
15463                        ADVERTISED_10baseT_Half |
15464                        ADVERTISED_10baseT_Full |
15465                        ADVERTISED_TP;
15466         else
15467                 adv |= ADVERTISED_FIBRE;
15468
15469         tp->link_config.advertising = adv;
15470         tp->link_config.speed = SPEED_UNKNOWN;
15471         tp->link_config.duplex = DUPLEX_UNKNOWN;
15472         tp->link_config.autoneg = AUTONEG_ENABLE;
15473         tp->link_config.active_speed = SPEED_UNKNOWN;
15474         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15475
15476         tp->old_link = -1;
15477 }
15478
15479 static int tg3_phy_probe(struct tg3 *tp)
15480 {
15481         u32 hw_phy_id_1, hw_phy_id_2;
15482         u32 hw_phy_id, hw_phy_id_masked;
15483         int err;
15484
15485         /* flow control autonegotiation is default behavior */
15486         tg3_flag_set(tp, PAUSE_AUTONEG);
15487         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15488
15489         if (tg3_flag(tp, ENABLE_APE)) {
15490                 switch (tp->pci_fn) {
15491                 case 0:
15492                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15493                         break;
15494                 case 1:
15495                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15496                         break;
15497                 case 2:
15498                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15499                         break;
15500                 case 3:
15501                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15502                         break;
15503                 }
15504         }
15505
15506         if (!tg3_flag(tp, ENABLE_ASF) &&
15507             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15508             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15509                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15510                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15511
15512         if (tg3_flag(tp, USE_PHYLIB))
15513                 return tg3_phy_init(tp);
15514
15515         /* Reading the PHY ID register can conflict with ASF
15516          * firmware access to the PHY hardware.
15517          */
15518         err = 0;
15519         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15520                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15521         } else {
15522                 /* Now read the physical PHY_ID from the chip and verify
15523                  * that it is sane.  If it doesn't look good, we fall back
15524                  * to either the hard-coded table based PHY_ID and failing
15525                  * that the value found in the eeprom area.
15526                  */
15527                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15528                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15529
15530                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15531                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15532                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15533
15534                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15535         }
15536
15537         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15538                 tp->phy_id = hw_phy_id;
15539                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15540                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15541                 else
15542                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15543         } else {
15544                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15545                         /* Do nothing, phy ID already set up in
15546                          * tg3_get_eeprom_hw_cfg().
15547                          */
15548                 } else {
15549                         struct subsys_tbl_ent *p;
15550
15551                         /* No eeprom signature?  Try the hardcoded
15552                          * subsys device table.
15553                          */
15554                         p = tg3_lookup_by_subsys(tp);
15555                         if (p) {
15556                                 tp->phy_id = p->phy_id;
15557                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15558                                 /* For now we saw the IDs 0xbc050cd0,
15559                                  * 0xbc050f80 and 0xbc050c30 on devices
15560                                  * connected to an BCM4785 and there are
15561                                  * probably more. Just assume that the phy is
15562                                  * supported when it is connected to a SSB core
15563                                  * for now.
15564                                  */
15565                                 return -ENODEV;
15566                         }
15567
15568                         if (!tp->phy_id ||
15569                             tp->phy_id == TG3_PHY_ID_BCM8002)
15570                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15571                 }
15572         }
15573
15574         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15575             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15576              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15577              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15578              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15579              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15580               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15581              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15582               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15583                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15584
15585                 tp->eee.supported = SUPPORTED_100baseT_Full |
15586                                     SUPPORTED_1000baseT_Full;
15587                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15588                                      ADVERTISED_1000baseT_Full;
15589                 tp->eee.eee_enabled = 1;
15590                 tp->eee.tx_lpi_enabled = 1;
15591                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15592         }
15593
15594         tg3_phy_init_link_config(tp);
15595
15596         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15597             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15598             !tg3_flag(tp, ENABLE_APE) &&
15599             !tg3_flag(tp, ENABLE_ASF)) {
15600                 u32 bmsr, dummy;
15601
15602                 tg3_readphy(tp, MII_BMSR, &bmsr);
15603                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15604                     (bmsr & BMSR_LSTATUS))
15605                         goto skip_phy_reset;
15606
15607                 err = tg3_phy_reset(tp);
15608                 if (err)
15609                         return err;
15610
15611                 tg3_phy_set_wirespeed(tp);
15612
15613                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15614                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15615                                             tp->link_config.flowctrl);
15616
15617                         tg3_writephy(tp, MII_BMCR,
15618                                      BMCR_ANENABLE | BMCR_ANRESTART);
15619                 }
15620         }
15621
15622 skip_phy_reset:
15623         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15624                 err = tg3_init_5401phy_dsp(tp);
15625                 if (err)
15626                         return err;
15627
15628                 err = tg3_init_5401phy_dsp(tp);
15629         }
15630
15631         return err;
15632 }
15633
15634 static void tg3_read_vpd(struct tg3 *tp)
15635 {
15636         u8 *vpd_data;
15637         unsigned int block_end, rosize, len;
15638         u32 vpdlen;
15639         int j, i = 0;
15640
15641         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15642         if (!vpd_data)
15643                 goto out_no_vpd;
15644
15645         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15646         if (i < 0)
15647                 goto out_not_found;
15648
15649         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15650         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15651         i += PCI_VPD_LRDT_TAG_SIZE;
15652
15653         if (block_end > vpdlen)
15654                 goto out_not_found;
15655
15656         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15657                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15658         if (j > 0) {
15659                 len = pci_vpd_info_field_size(&vpd_data[j]);
15660
15661                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15662                 if (j + len > block_end || len != 4 ||
15663                     memcmp(&vpd_data[j], "1028", 4))
15664                         goto partno;
15665
15666                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15667                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15668                 if (j < 0)
15669                         goto partno;
15670
15671                 len = pci_vpd_info_field_size(&vpd_data[j]);
15672
15673                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15674                 if (j + len > block_end)
15675                         goto partno;
15676
15677                 if (len >= sizeof(tp->fw_ver))
15678                         len = sizeof(tp->fw_ver) - 1;
15679                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15680                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15681                          &vpd_data[j]);
15682         }
15683
15684 partno:
15685         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15686                                       PCI_VPD_RO_KEYWORD_PARTNO);
15687         if (i < 0)
15688                 goto out_not_found;
15689
15690         len = pci_vpd_info_field_size(&vpd_data[i]);
15691
15692         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15693         if (len > TG3_BPN_SIZE ||
15694             (len + i) > vpdlen)
15695                 goto out_not_found;
15696
15697         memcpy(tp->board_part_number, &vpd_data[i], len);
15698
15699 out_not_found:
15700         kfree(vpd_data);
15701         if (tp->board_part_number[0])
15702                 return;
15703
15704 out_no_vpd:
15705         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15706                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15707                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15708                         strcpy(tp->board_part_number, "BCM5717");
15709                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15710                         strcpy(tp->board_part_number, "BCM5718");
15711                 else
15712                         goto nomatch;
15713         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15714                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15715                         strcpy(tp->board_part_number, "BCM57780");
15716                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15717                         strcpy(tp->board_part_number, "BCM57760");
15718                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15719                         strcpy(tp->board_part_number, "BCM57790");
15720                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15721                         strcpy(tp->board_part_number, "BCM57788");
15722                 else
15723                         goto nomatch;
15724         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15725                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15726                         strcpy(tp->board_part_number, "BCM57761");
15727                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15728                         strcpy(tp->board_part_number, "BCM57765");
15729                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15730                         strcpy(tp->board_part_number, "BCM57781");
15731                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15732                         strcpy(tp->board_part_number, "BCM57785");
15733                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15734                         strcpy(tp->board_part_number, "BCM57791");
15735                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15736                         strcpy(tp->board_part_number, "BCM57795");
15737                 else
15738                         goto nomatch;
15739         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15740                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15741                         strcpy(tp->board_part_number, "BCM57762");
15742                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15743                         strcpy(tp->board_part_number, "BCM57766");
15744                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15745                         strcpy(tp->board_part_number, "BCM57782");
15746                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15747                         strcpy(tp->board_part_number, "BCM57786");
15748                 else
15749                         goto nomatch;
15750         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15751                 strcpy(tp->board_part_number, "BCM95906");
15752         } else {
15753 nomatch:
15754                 strcpy(tp->board_part_number, "none");
15755         }
15756 }
15757
15758 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15759 {
15760         u32 val;
15761
15762         if (tg3_nvram_read(tp, offset, &val) ||
15763             (val & 0xfc000000) != 0x0c000000 ||
15764             tg3_nvram_read(tp, offset + 4, &val) ||
15765             val != 0)
15766                 return 0;
15767
15768         return 1;
15769 }
15770
15771 static void tg3_read_bc_ver(struct tg3 *tp)
15772 {
15773         u32 val, offset, start, ver_offset;
15774         int i, dst_off;
15775         bool newver = false;
15776
15777         if (tg3_nvram_read(tp, 0xc, &offset) ||
15778             tg3_nvram_read(tp, 0x4, &start))
15779                 return;
15780
15781         offset = tg3_nvram_logical_addr(tp, offset);
15782
15783         if (tg3_nvram_read(tp, offset, &val))
15784                 return;
15785
15786         if ((val & 0xfc000000) == 0x0c000000) {
15787                 if (tg3_nvram_read(tp, offset + 4, &val))
15788                         return;
15789
15790                 if (val == 0)
15791                         newver = true;
15792         }
15793
15794         dst_off = strlen(tp->fw_ver);
15795
15796         if (newver) {
15797                 if (TG3_VER_SIZE - dst_off < 16 ||
15798                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15799                         return;
15800
15801                 offset = offset + ver_offset - start;
15802                 for (i = 0; i < 16; i += 4) {
15803                         __be32 v;
15804                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15805                                 return;
15806
15807                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15808                 }
15809         } else {
15810                 u32 major, minor;
15811
15812                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15813                         return;
15814
15815                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15816                         TG3_NVM_BCVER_MAJSFT;
15817                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15818                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15819                          "v%d.%02d", major, minor);
15820         }
15821 }
15822
15823 static void tg3_read_hwsb_ver(struct tg3 *tp)
15824 {
15825         u32 val, major, minor;
15826
15827         /* Use native endian representation */
15828         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15829                 return;
15830
15831         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15832                 TG3_NVM_HWSB_CFG1_MAJSFT;
15833         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15834                 TG3_NVM_HWSB_CFG1_MINSFT;
15835
15836         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15837 }
15838
15839 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15840 {
15841         u32 offset, major, minor, build;
15842
15843         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15844
15845         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15846                 return;
15847
15848         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15849         case TG3_EEPROM_SB_REVISION_0:
15850                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15851                 break;
15852         case TG3_EEPROM_SB_REVISION_2:
15853                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15854                 break;
15855         case TG3_EEPROM_SB_REVISION_3:
15856                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15857                 break;
15858         case TG3_EEPROM_SB_REVISION_4:
15859                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15860                 break;
15861         case TG3_EEPROM_SB_REVISION_5:
15862                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15863                 break;
15864         case TG3_EEPROM_SB_REVISION_6:
15865                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15866                 break;
15867         default:
15868                 return;
15869         }
15870
15871         if (tg3_nvram_read(tp, offset, &val))
15872                 return;
15873
15874         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15875                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15876         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15877                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15878         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15879
15880         if (minor > 99 || build > 26)
15881                 return;
15882
15883         offset = strlen(tp->fw_ver);
15884         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15885                  " v%d.%02d", major, minor);
15886
15887         if (build > 0) {
15888                 offset = strlen(tp->fw_ver);
15889                 if (offset < TG3_VER_SIZE - 1)
15890                         tp->fw_ver[offset] = 'a' + build - 1;
15891         }
15892 }
15893
15894 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15895 {
15896         u32 val, offset, start;
15897         int i, vlen;
15898
15899         for (offset = TG3_NVM_DIR_START;
15900              offset < TG3_NVM_DIR_END;
15901              offset += TG3_NVM_DIRENT_SIZE) {
15902                 if (tg3_nvram_read(tp, offset, &val))
15903                         return;
15904
15905                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15906                         break;
15907         }
15908
15909         if (offset == TG3_NVM_DIR_END)
15910                 return;
15911
15912         if (!tg3_flag(tp, 5705_PLUS))
15913                 start = 0x08000000;
15914         else if (tg3_nvram_read(tp, offset - 4, &start))
15915                 return;
15916
15917         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15918             !tg3_fw_img_is_valid(tp, offset) ||
15919             tg3_nvram_read(tp, offset + 8, &val))
15920                 return;
15921
15922         offset += val - start;
15923
15924         vlen = strlen(tp->fw_ver);
15925
15926         tp->fw_ver[vlen++] = ',';
15927         tp->fw_ver[vlen++] = ' ';
15928
15929         for (i = 0; i < 4; i++) {
15930                 __be32 v;
15931                 if (tg3_nvram_read_be32(tp, offset, &v))
15932                         return;
15933
15934                 offset += sizeof(v);
15935
15936                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15937                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15938                         break;
15939                 }
15940
15941                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15942                 vlen += sizeof(v);
15943         }
15944 }
15945
15946 static void tg3_probe_ncsi(struct tg3 *tp)
15947 {
15948         u32 apedata;
15949
15950         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15951         if (apedata != APE_SEG_SIG_MAGIC)
15952                 return;
15953
15954         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15955         if (!(apedata & APE_FW_STATUS_READY))
15956                 return;
15957
15958         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15959                 tg3_flag_set(tp, APE_HAS_NCSI);
15960 }
15961
15962 static void tg3_read_dash_ver(struct tg3 *tp)
15963 {
15964         int vlen;
15965         u32 apedata;
15966         char *fwtype;
15967
15968         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15969
15970         if (tg3_flag(tp, APE_HAS_NCSI))
15971                 fwtype = "NCSI";
15972         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15973                 fwtype = "SMASH";
15974         else
15975                 fwtype = "DASH";
15976
15977         vlen = strlen(tp->fw_ver);
15978
15979         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15980                  fwtype,
15981                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15982                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15983                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15984                  (apedata & APE_FW_VERSION_BLDMSK));
15985 }
15986
15987 static void tg3_read_otp_ver(struct tg3 *tp)
15988 {
15989         u32 val, val2;
15990
15991         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15992                 return;
15993
15994         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15995             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15996             TG3_OTP_MAGIC0_VALID(val)) {
15997                 u64 val64 = (u64) val << 32 | val2;
15998                 u32 ver = 0;
15999                 int i, vlen;
16000
16001                 for (i = 0; i < 7; i++) {
16002                         if ((val64 & 0xff) == 0)
16003                                 break;
16004                         ver = val64 & 0xff;
16005                         val64 >>= 8;
16006                 }
16007                 vlen = strlen(tp->fw_ver);
16008                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16009         }
16010 }
16011
16012 static void tg3_read_fw_ver(struct tg3 *tp)
16013 {
16014         u32 val;
16015         bool vpd_vers = false;
16016
16017         if (tp->fw_ver[0] != 0)
16018                 vpd_vers = true;
16019
16020         if (tg3_flag(tp, NO_NVRAM)) {
16021                 strcat(tp->fw_ver, "sb");
16022                 tg3_read_otp_ver(tp);
16023                 return;
16024         }
16025
16026         if (tg3_nvram_read(tp, 0, &val))
16027                 return;
16028
16029         if (val == TG3_EEPROM_MAGIC)
16030                 tg3_read_bc_ver(tp);
16031         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16032                 tg3_read_sb_ver(tp, val);
16033         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16034                 tg3_read_hwsb_ver(tp);
16035
16036         if (tg3_flag(tp, ENABLE_ASF)) {
16037                 if (tg3_flag(tp, ENABLE_APE)) {
16038                         tg3_probe_ncsi(tp);
16039                         if (!vpd_vers)
16040                                 tg3_read_dash_ver(tp);
16041                 } else if (!vpd_vers) {
16042                         tg3_read_mgmtfw_ver(tp);
16043                 }
16044         }
16045
16046         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16047 }
16048
16049 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16050 {
16051         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16052                 return TG3_RX_RET_MAX_SIZE_5717;
16053         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16054                 return TG3_RX_RET_MAX_SIZE_5700;
16055         else
16056                 return TG3_RX_RET_MAX_SIZE_5705;
16057 }
16058
16059 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16060         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16061         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16062         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16063         { },
16064 };
16065
16066 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16067 {
16068         struct pci_dev *peer;
16069         unsigned int func, devnr = tp->pdev->devfn & ~7;
16070
16071         for (func = 0; func < 8; func++) {
16072                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16073                 if (peer && peer != tp->pdev)
16074                         break;
16075                 pci_dev_put(peer);
16076         }
16077         /* 5704 can be configured in single-port mode, set peer to
16078          * tp->pdev in that case.
16079          */
16080         if (!peer) {
16081                 peer = tp->pdev;
16082                 return peer;
16083         }
16084
16085         /*
16086          * We don't need to keep the refcount elevated; there's no way
16087          * to remove one half of this device without removing the other
16088          */
16089         pci_dev_put(peer);
16090
16091         return peer;
16092 }
16093
16094 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16095 {
16096         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16097         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16098                 u32 reg;
16099
16100                 /* All devices that use the alternate
16101                  * ASIC REV location have a CPMU.
16102                  */
16103                 tg3_flag_set(tp, CPMU_PRESENT);
16104
16105                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16106                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16107                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16108                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16109                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16110                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16111                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16112                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16113                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16114                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16115                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16116                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16117                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16118                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16119                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16120                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16121                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16122                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16123                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16124                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16125                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16126                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16127                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16128                 else
16129                         reg = TG3PCI_PRODID_ASICREV;
16130
16131                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16132         }
16133
16134         /* Wrong chip ID in 5752 A0. This code can be removed later
16135          * as A0 is not in production.
16136          */
16137         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16138                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16139
16140         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16141                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16142
16143         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16144             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16145             tg3_asic_rev(tp) == ASIC_REV_5720)
16146                 tg3_flag_set(tp, 5717_PLUS);
16147
16148         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16149             tg3_asic_rev(tp) == ASIC_REV_57766)
16150                 tg3_flag_set(tp, 57765_CLASS);
16151
16152         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16153              tg3_asic_rev(tp) == ASIC_REV_5762)
16154                 tg3_flag_set(tp, 57765_PLUS);
16155
16156         /* Intentionally exclude ASIC_REV_5906 */
16157         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16158             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16159             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16160             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16161             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16162             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16163             tg3_flag(tp, 57765_PLUS))
16164                 tg3_flag_set(tp, 5755_PLUS);
16165
16166         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16167             tg3_asic_rev(tp) == ASIC_REV_5714)
16168                 tg3_flag_set(tp, 5780_CLASS);
16169
16170         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16171             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16172             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16173             tg3_flag(tp, 5755_PLUS) ||
16174             tg3_flag(tp, 5780_CLASS))
16175                 tg3_flag_set(tp, 5750_PLUS);
16176
16177         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16178             tg3_flag(tp, 5750_PLUS))
16179                 tg3_flag_set(tp, 5705_PLUS);
16180 }
16181
16182 static bool tg3_10_100_only_device(struct tg3 *tp,
16183                                    const struct pci_device_id *ent)
16184 {
16185         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16186
16187         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16188              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16189             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16190                 return true;
16191
16192         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16193                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16194                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16195                                 return true;
16196                 } else {
16197                         return true;
16198                 }
16199         }
16200
16201         return false;
16202 }
16203
16204 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16205 {
16206         u32 misc_ctrl_reg;
16207         u32 pci_state_reg, grc_misc_cfg;
16208         u32 val;
16209         u16 pci_cmd;
16210         int err;
16211
16212         /* Force memory write invalidate off.  If we leave it on,
16213          * then on 5700_BX chips we have to enable a workaround.
16214          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16215          * to match the cacheline size.  The Broadcom driver have this
16216          * workaround but turns MWI off all the times so never uses
16217          * it.  This seems to suggest that the workaround is insufficient.
16218          */
16219         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16220         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16221         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16222
16223         /* Important! -- Make sure register accesses are byteswapped
16224          * correctly.  Also, for those chips that require it, make
16225          * sure that indirect register accesses are enabled before
16226          * the first operation.
16227          */
16228         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16229                               &misc_ctrl_reg);
16230         tp->misc_host_ctrl |= (misc_ctrl_reg &
16231                                MISC_HOST_CTRL_CHIPREV);
16232         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16233                                tp->misc_host_ctrl);
16234
16235         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16236
16237         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16238          * we need to disable memory and use config. cycles
16239          * only to access all registers. The 5702/03 chips
16240          * can mistakenly decode the special cycles from the
16241          * ICH chipsets as memory write cycles, causing corruption
16242          * of register and memory space. Only certain ICH bridges
16243          * will drive special cycles with non-zero data during the
16244          * address phase which can fall within the 5703's address
16245          * range. This is not an ICH bug as the PCI spec allows
16246          * non-zero address during special cycles. However, only
16247          * these ICH bridges are known to drive non-zero addresses
16248          * during special cycles.
16249          *
16250          * Since special cycles do not cross PCI bridges, we only
16251          * enable this workaround if the 5703 is on the secondary
16252          * bus of these ICH bridges.
16253          */
16254         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16255             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16256                 static struct tg3_dev_id {
16257                         u32     vendor;
16258                         u32     device;
16259                         u32     rev;
16260                 } ich_chipsets[] = {
16261                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16262                           PCI_ANY_ID },
16263                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16264                           PCI_ANY_ID },
16265                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16266                           0xa },
16267                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16268                           PCI_ANY_ID },
16269                         { },
16270                 };
16271                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16272                 struct pci_dev *bridge = NULL;
16273
16274                 while (pci_id->vendor != 0) {
16275                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16276                                                 bridge);
16277                         if (!bridge) {
16278                                 pci_id++;
16279                                 continue;
16280                         }
16281                         if (pci_id->rev != PCI_ANY_ID) {
16282                                 if (bridge->revision > pci_id->rev)
16283                                         continue;
16284                         }
16285                         if (bridge->subordinate &&
16286                             (bridge->subordinate->number ==
16287                              tp->pdev->bus->number)) {
16288                                 tg3_flag_set(tp, ICH_WORKAROUND);
16289                                 pci_dev_put(bridge);
16290                                 break;
16291                         }
16292                 }
16293         }
16294
16295         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16296                 static struct tg3_dev_id {
16297                         u32     vendor;
16298                         u32     device;
16299                 } bridge_chipsets[] = {
16300                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16301                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16302                         { },
16303                 };
16304                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16305                 struct pci_dev *bridge = NULL;
16306
16307                 while (pci_id->vendor != 0) {
16308                         bridge = pci_get_device(pci_id->vendor,
16309                                                 pci_id->device,
16310                                                 bridge);
16311                         if (!bridge) {
16312                                 pci_id++;
16313                                 continue;
16314                         }
16315                         if (bridge->subordinate &&
16316                             (bridge->subordinate->number <=
16317                              tp->pdev->bus->number) &&
16318                             (bridge->subordinate->busn_res.end >=
16319                              tp->pdev->bus->number)) {
16320                                 tg3_flag_set(tp, 5701_DMA_BUG);
16321                                 pci_dev_put(bridge);
16322                                 break;
16323                         }
16324                 }
16325         }
16326
16327         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16328          * DMA addresses > 40-bit. This bridge may have other additional
16329          * 57xx devices behind it in some 4-port NIC designs for example.
16330          * Any tg3 device found behind the bridge will also need the 40-bit
16331          * DMA workaround.
16332          */
16333         if (tg3_flag(tp, 5780_CLASS)) {
16334                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16335                 tp->msi_cap = tp->pdev->msi_cap;
16336         } else {
16337                 struct pci_dev *bridge = NULL;
16338
16339                 do {
16340                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16341                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16342                                                 bridge);
16343                         if (bridge && bridge->subordinate &&
16344                             (bridge->subordinate->number <=
16345                              tp->pdev->bus->number) &&
16346                             (bridge->subordinate->busn_res.end >=
16347                              tp->pdev->bus->number)) {
16348                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16349                                 pci_dev_put(bridge);
16350                                 break;
16351                         }
16352                 } while (bridge);
16353         }
16354
16355         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16356             tg3_asic_rev(tp) == ASIC_REV_5714)
16357                 tp->pdev_peer = tg3_find_peer(tp);
16358
16359         /* Determine TSO capabilities */
16360         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16361                 ; /* Do nothing. HW bug. */
16362         else if (tg3_flag(tp, 57765_PLUS))
16363                 tg3_flag_set(tp, HW_TSO_3);
16364         else if (tg3_flag(tp, 5755_PLUS) ||
16365                  tg3_asic_rev(tp) == ASIC_REV_5906)
16366                 tg3_flag_set(tp, HW_TSO_2);
16367         else if (tg3_flag(tp, 5750_PLUS)) {
16368                 tg3_flag_set(tp, HW_TSO_1);
16369                 tg3_flag_set(tp, TSO_BUG);
16370                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16371                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16372                         tg3_flag_clear(tp, TSO_BUG);
16373         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16374                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16375                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16376                 tg3_flag_set(tp, FW_TSO);
16377                 tg3_flag_set(tp, TSO_BUG);
16378                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16379                         tp->fw_needed = FIRMWARE_TG3TSO5;
16380                 else
16381                         tp->fw_needed = FIRMWARE_TG3TSO;
16382         }
16383
16384         /* Selectively allow TSO based on operating conditions */
16385         if (tg3_flag(tp, HW_TSO_1) ||
16386             tg3_flag(tp, HW_TSO_2) ||
16387             tg3_flag(tp, HW_TSO_3) ||
16388             tg3_flag(tp, FW_TSO)) {
16389                 /* For firmware TSO, assume ASF is disabled.
16390                  * We'll disable TSO later if we discover ASF
16391                  * is enabled in tg3_get_eeprom_hw_cfg().
16392                  */
16393                 tg3_flag_set(tp, TSO_CAPABLE);
16394         } else {
16395                 tg3_flag_clear(tp, TSO_CAPABLE);
16396                 tg3_flag_clear(tp, TSO_BUG);
16397                 tp->fw_needed = NULL;
16398         }
16399
16400         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16401                 tp->fw_needed = FIRMWARE_TG3;
16402
16403         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16404                 tp->fw_needed = FIRMWARE_TG357766;
16405
16406         tp->irq_max = 1;
16407
16408         if (tg3_flag(tp, 5750_PLUS)) {
16409                 tg3_flag_set(tp, SUPPORT_MSI);
16410                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16411                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16412                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16413                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16414                      tp->pdev_peer == tp->pdev))
16415                         tg3_flag_clear(tp, SUPPORT_MSI);
16416
16417                 if (tg3_flag(tp, 5755_PLUS) ||
16418                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16419                         tg3_flag_set(tp, 1SHOT_MSI);
16420                 }
16421
16422                 if (tg3_flag(tp, 57765_PLUS)) {
16423                         tg3_flag_set(tp, SUPPORT_MSIX);
16424                         tp->irq_max = TG3_IRQ_MAX_VECS;
16425                 }
16426         }
16427
16428         tp->txq_max = 1;
16429         tp->rxq_max = 1;
16430         if (tp->irq_max > 1) {
16431                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16432                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16433
16434                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16435                     tg3_asic_rev(tp) == ASIC_REV_5720)
16436                         tp->txq_max = tp->irq_max - 1;
16437         }
16438
16439         if (tg3_flag(tp, 5755_PLUS) ||
16440             tg3_asic_rev(tp) == ASIC_REV_5906)
16441                 tg3_flag_set(tp, SHORT_DMA_BUG);
16442
16443         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16444                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16445
16446         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16447             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16448             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16449             tg3_asic_rev(tp) == ASIC_REV_5762)
16450                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16451
16452         if (tg3_flag(tp, 57765_PLUS) &&
16453             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16454                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16455
16456         if (!tg3_flag(tp, 5705_PLUS) ||
16457             tg3_flag(tp, 5780_CLASS) ||
16458             tg3_flag(tp, USE_JUMBO_BDFLAG))
16459                 tg3_flag_set(tp, JUMBO_CAPABLE);
16460
16461         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16462                               &pci_state_reg);
16463
16464         if (pci_is_pcie(tp->pdev)) {
16465                 u16 lnkctl;
16466
16467                 tg3_flag_set(tp, PCI_EXPRESS);
16468
16469                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16470                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16471                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16472                                 tg3_flag_clear(tp, HW_TSO_2);
16473                                 tg3_flag_clear(tp, TSO_CAPABLE);
16474                         }
16475                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16476                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16477                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16478                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16479                                 tg3_flag_set(tp, CLKREQ_BUG);
16480                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16481                         tg3_flag_set(tp, L1PLLPD_EN);
16482                 }
16483         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16484                 /* BCM5785 devices are effectively PCIe devices, and should
16485                  * follow PCIe codepaths, but do not have a PCIe capabilities
16486                  * section.
16487                  */
16488                 tg3_flag_set(tp, PCI_EXPRESS);
16489         } else if (!tg3_flag(tp, 5705_PLUS) ||
16490                    tg3_flag(tp, 5780_CLASS)) {
16491                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16492                 if (!tp->pcix_cap) {
16493                         dev_err(&tp->pdev->dev,
16494                                 "Cannot find PCI-X capability, aborting\n");
16495                         return -EIO;
16496                 }
16497
16498                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16499                         tg3_flag_set(tp, PCIX_MODE);
16500         }
16501
16502         /* If we have an AMD 762 or VIA K8T800 chipset, write
16503          * reordering to the mailbox registers done by the host
16504          * controller can cause major troubles.  We read back from
16505          * every mailbox register write to force the writes to be
16506          * posted to the chip in order.
16507          */
16508         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16509             !tg3_flag(tp, PCI_EXPRESS))
16510                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16511
16512         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16513                              &tp->pci_cacheline_sz);
16514         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16515                              &tp->pci_lat_timer);
16516         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16517             tp->pci_lat_timer < 64) {
16518                 tp->pci_lat_timer = 64;
16519                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16520                                       tp->pci_lat_timer);
16521         }
16522
16523         /* Important! -- It is critical that the PCI-X hw workaround
16524          * situation is decided before the first MMIO register access.
16525          */
16526         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16527                 /* 5700 BX chips need to have their TX producer index
16528                  * mailboxes written twice to workaround a bug.
16529                  */
16530                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16531
16532                 /* If we are in PCI-X mode, enable register write workaround.
16533                  *
16534                  * The workaround is to use indirect register accesses
16535                  * for all chip writes not to mailbox registers.
16536                  */
16537                 if (tg3_flag(tp, PCIX_MODE)) {
16538                         u32 pm_reg;
16539
16540                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16541
16542                         /* The chip can have it's power management PCI config
16543                          * space registers clobbered due to this bug.
16544                          * So explicitly force the chip into D0 here.
16545                          */
16546                         pci_read_config_dword(tp->pdev,
16547                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16548                                               &pm_reg);
16549                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16550                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16551                         pci_write_config_dword(tp->pdev,
16552                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16553                                                pm_reg);
16554
16555                         /* Also, force SERR#/PERR# in PCI command. */
16556                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16557                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16558                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16559                 }
16560         }
16561
16562         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16563                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16564         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16565                 tg3_flag_set(tp, PCI_32BIT);
16566
16567         /* Chip-specific fixup from Broadcom driver */
16568         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16569             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16570                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16571                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16572         }
16573
16574         /* Default fast path register access methods */
16575         tp->read32 = tg3_read32;
16576         tp->write32 = tg3_write32;
16577         tp->read32_mbox = tg3_read32;
16578         tp->write32_mbox = tg3_write32;
16579         tp->write32_tx_mbox = tg3_write32;
16580         tp->write32_rx_mbox = tg3_write32;
16581
16582         /* Various workaround register access methods */
16583         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16584                 tp->write32 = tg3_write_indirect_reg32;
16585         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16586                  (tg3_flag(tp, PCI_EXPRESS) &&
16587                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16588                 /*
16589                  * Back to back register writes can cause problems on these
16590                  * chips, the workaround is to read back all reg writes
16591                  * except those to mailbox regs.
16592                  *
16593                  * See tg3_write_indirect_reg32().
16594                  */
16595                 tp->write32 = tg3_write_flush_reg32;
16596         }
16597
16598         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16599                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16600                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16601                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16602         }
16603
16604         if (tg3_flag(tp, ICH_WORKAROUND)) {
16605                 tp->read32 = tg3_read_indirect_reg32;
16606                 tp->write32 = tg3_write_indirect_reg32;
16607                 tp->read32_mbox = tg3_read_indirect_mbox;
16608                 tp->write32_mbox = tg3_write_indirect_mbox;
16609                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16610                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16611
16612                 iounmap(tp->regs);
16613                 tp->regs = NULL;
16614
16615                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16616                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16617                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16618         }
16619         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16620                 tp->read32_mbox = tg3_read32_mbox_5906;
16621                 tp->write32_mbox = tg3_write32_mbox_5906;
16622                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16623                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16624         }
16625
16626         if (tp->write32 == tg3_write_indirect_reg32 ||
16627             (tg3_flag(tp, PCIX_MODE) &&
16628              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16629               tg3_asic_rev(tp) == ASIC_REV_5701)))
16630                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16631
16632         /* The memory arbiter has to be enabled in order for SRAM accesses
16633          * to succeed.  Normally on powerup the tg3 chip firmware will make
16634          * sure it is enabled, but other entities such as system netboot
16635          * code might disable it.
16636          */
16637         val = tr32(MEMARB_MODE);
16638         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16639
16640         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16641         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16642             tg3_flag(tp, 5780_CLASS)) {
16643                 if (tg3_flag(tp, PCIX_MODE)) {
16644                         pci_read_config_dword(tp->pdev,
16645                                               tp->pcix_cap + PCI_X_STATUS,
16646                                               &val);
16647                         tp->pci_fn = val & 0x7;
16648                 }
16649         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16650                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16651                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16652                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16653                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16654                         val = tr32(TG3_CPMU_STATUS);
16655
16656                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16657                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16658                 else
16659                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16660                                      TG3_CPMU_STATUS_FSHFT_5719;
16661         }
16662
16663         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16664                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16665                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16666         }
16667
16668         /* Get eeprom hw config before calling tg3_set_power_state().
16669          * In particular, the TG3_FLAG_IS_NIC flag must be
16670          * determined before calling tg3_set_power_state() so that
16671          * we know whether or not to switch out of Vaux power.
16672          * When the flag is set, it means that GPIO1 is used for eeprom
16673          * write protect and also implies that it is a LOM where GPIOs
16674          * are not used to switch power.
16675          */
16676         tg3_get_eeprom_hw_cfg(tp);
16677
16678         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16679                 tg3_flag_clear(tp, TSO_CAPABLE);
16680                 tg3_flag_clear(tp, TSO_BUG);
16681                 tp->fw_needed = NULL;
16682         }
16683
16684         if (tg3_flag(tp, ENABLE_APE)) {
16685                 /* Allow reads and writes to the
16686                  * APE register and memory space.
16687                  */
16688                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16689                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16690                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16691                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16692                                        pci_state_reg);
16693
16694                 tg3_ape_lock_init(tp);
16695                 tp->ape_hb_interval =
16696                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16697         }
16698
16699         /* Set up tp->grc_local_ctrl before calling
16700          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16701          * will bring 5700's external PHY out of reset.
16702          * It is also used as eeprom write protect on LOMs.
16703          */
16704         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16705         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16706             tg3_flag(tp, EEPROM_WRITE_PROT))
16707                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16708                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16709         /* Unused GPIO3 must be driven as output on 5752 because there
16710          * are no pull-up resistors on unused GPIO pins.
16711          */
16712         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16713                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16714
16715         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16716             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16717             tg3_flag(tp, 57765_CLASS))
16718                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16719
16720         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16722                 /* Turn off the debug UART. */
16723                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16724                 if (tg3_flag(tp, IS_NIC))
16725                         /* Keep VMain power. */
16726                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16727                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16728         }
16729
16730         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16731                 tp->grc_local_ctrl |=
16732                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16733
16734         /* Switch out of Vaux if it is a NIC */
16735         tg3_pwrsrc_switch_to_vmain(tp);
16736
16737         /* Derive initial jumbo mode from MTU assigned in
16738          * ether_setup() via the alloc_etherdev() call
16739          */
16740         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16741                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16742
16743         /* Determine WakeOnLan speed to use. */
16744         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16745             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16746             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16747             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16748                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16749         } else {
16750                 tg3_flag_set(tp, WOL_SPEED_100MB);
16751         }
16752
16753         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16754                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16755
16756         /* A few boards don't want Ethernet@WireSpeed phy feature */
16757         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16758             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16759              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16760              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16761             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16762             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16763                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16764
16765         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16766             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16767                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16768         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16769                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16770
16771         if (tg3_flag(tp, 5705_PLUS) &&
16772             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16773             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16774             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16775             !tg3_flag(tp, 57765_PLUS)) {
16776                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16777                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16778                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16779                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16780                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16781                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16782                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16783                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16784                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16785                 } else
16786                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16787         }
16788
16789         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16790             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16791                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16792                 if (tp->phy_otp == 0)
16793                         tp->phy_otp = TG3_OTP_DEFAULT;
16794         }
16795
16796         if (tg3_flag(tp, CPMU_PRESENT))
16797                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16798         else
16799                 tp->mi_mode = MAC_MI_MODE_BASE;
16800
16801         tp->coalesce_mode = 0;
16802         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16803             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16804                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16805
16806         /* Set these bits to enable statistics workaround. */
16807         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16808             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16809             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16810             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16811                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16812                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16813         }
16814
16815         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16816             tg3_asic_rev(tp) == ASIC_REV_57780)
16817                 tg3_flag_set(tp, USE_PHYLIB);
16818
16819         err = tg3_mdio_init(tp);
16820         if (err)
16821                 return err;
16822
16823         /* Initialize data/descriptor byte/word swapping. */
16824         val = tr32(GRC_MODE);
16825         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16826             tg3_asic_rev(tp) == ASIC_REV_5762)
16827                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16828                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16829                         GRC_MODE_B2HRX_ENABLE |
16830                         GRC_MODE_HTX2B_ENABLE |
16831                         GRC_MODE_HOST_STACKUP);
16832         else
16833                 val &= GRC_MODE_HOST_STACKUP;
16834
16835         tw32(GRC_MODE, val | tp->grc_mode);
16836
16837         tg3_switch_clocks(tp);
16838
16839         /* Clear this out for sanity. */
16840         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16841
16842         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16843         tw32(TG3PCI_REG_BASE_ADDR, 0);
16844
16845         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16846                               &pci_state_reg);
16847         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16848             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16849                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16850                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16851                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16852                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16853                         void __iomem *sram_base;
16854
16855                         /* Write some dummy words into the SRAM status block
16856                          * area, see if it reads back correctly.  If the return
16857                          * value is bad, force enable the PCIX workaround.
16858                          */
16859                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16860
16861                         writel(0x00000000, sram_base);
16862                         writel(0x00000000, sram_base + 4);
16863                         writel(0xffffffff, sram_base + 4);
16864                         if (readl(sram_base) != 0x00000000)
16865                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16866                 }
16867         }
16868
16869         udelay(50);
16870         tg3_nvram_init(tp);
16871
16872         /* If the device has an NVRAM, no need to load patch firmware */
16873         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16874             !tg3_flag(tp, NO_NVRAM))
16875                 tp->fw_needed = NULL;
16876
16877         grc_misc_cfg = tr32(GRC_MISC_CFG);
16878         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16879
16880         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16881             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16882              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16883                 tg3_flag_set(tp, IS_5788);
16884
16885         if (!tg3_flag(tp, IS_5788) &&
16886             tg3_asic_rev(tp) != ASIC_REV_5700)
16887                 tg3_flag_set(tp, TAGGED_STATUS);
16888         if (tg3_flag(tp, TAGGED_STATUS)) {
16889                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16890                                       HOSTCC_MODE_CLRTICK_TXBD);
16891
16892                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16893                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16894                                        tp->misc_host_ctrl);
16895         }
16896
16897         /* Preserve the APE MAC_MODE bits */
16898         if (tg3_flag(tp, ENABLE_APE))
16899                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16900         else
16901                 tp->mac_mode = 0;
16902
16903         if (tg3_10_100_only_device(tp, ent))
16904                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16905
16906         err = tg3_phy_probe(tp);
16907         if (err) {
16908                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16909                 /* ... but do not return immediately ... */
16910                 tg3_mdio_fini(tp);
16911         }
16912
16913         tg3_read_vpd(tp);
16914         tg3_read_fw_ver(tp);
16915
16916         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16917                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16918         } else {
16919                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16920                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16921                 else
16922                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16923         }
16924
16925         /* 5700 {AX,BX} chips have a broken status block link
16926          * change bit implementation, so we must use the
16927          * status register in those cases.
16928          */
16929         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16930                 tg3_flag_set(tp, USE_LINKCHG_REG);
16931         else
16932                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16933
16934         /* The led_ctrl is set during tg3_phy_probe, here we might
16935          * have to force the link status polling mechanism based
16936          * upon subsystem IDs.
16937          */
16938         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16939             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16940             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16941                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16942                 tg3_flag_set(tp, USE_LINKCHG_REG);
16943         }
16944
16945         /* For all SERDES we poll the MAC status register. */
16946         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16947                 tg3_flag_set(tp, POLL_SERDES);
16948         else
16949                 tg3_flag_clear(tp, POLL_SERDES);
16950
16951         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16952                 tg3_flag_set(tp, POLL_CPMU_LINK);
16953
16954         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16955         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16956         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16957             tg3_flag(tp, PCIX_MODE)) {
16958                 tp->rx_offset = NET_SKB_PAD;
16959 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16960                 tp->rx_copy_thresh = ~(u16)0;
16961 #endif
16962         }
16963
16964         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16965         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16966         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16967
16968         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16969
16970         /* Increment the rx prod index on the rx std ring by at most
16971          * 8 for these chips to workaround hw errata.
16972          */
16973         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16974             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16975             tg3_asic_rev(tp) == ASIC_REV_5755)
16976                 tp->rx_std_max_post = 8;
16977
16978         if (tg3_flag(tp, ASPM_WORKAROUND))
16979                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16980                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16981
16982         return err;
16983 }
16984
16985 static int tg3_get_device_address(struct tg3 *tp)
16986 {
16987         struct net_device *dev = tp->dev;
16988         u32 hi, lo, mac_offset;
16989         int addr_ok = 0;
16990         int err;
16991
16992         if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16993                 return 0;
16994
16995         if (tg3_flag(tp, IS_SSB_CORE)) {
16996                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16997                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16998                         return 0;
16999         }
17000
17001         mac_offset = 0x7c;
17002         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17003             tg3_flag(tp, 5780_CLASS)) {
17004                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17005                         mac_offset = 0xcc;
17006                 if (tg3_nvram_lock(tp))
17007                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17008                 else
17009                         tg3_nvram_unlock(tp);
17010         } else if (tg3_flag(tp, 5717_PLUS)) {
17011                 if (tp->pci_fn & 1)
17012                         mac_offset = 0xcc;
17013                 if (tp->pci_fn > 1)
17014                         mac_offset += 0x18c;
17015         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17016                 mac_offset = 0x10;
17017
17018         /* First try to get it from MAC address mailbox. */
17019         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17020         if ((hi >> 16) == 0x484b) {
17021                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17022                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17023
17024                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17025                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17026                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17027                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17028                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17029
17030                 /* Some old bootcode may report a 0 MAC address in SRAM */
17031                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17032         }
17033         if (!addr_ok) {
17034                 /* Next, try NVRAM. */
17035                 if (!tg3_flag(tp, NO_NVRAM) &&
17036                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17037                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17038                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17039                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17040                 }
17041                 /* Finally just fetch it out of the MAC control regs. */
17042                 else {
17043                         hi = tr32(MAC_ADDR_0_HIGH);
17044                         lo = tr32(MAC_ADDR_0_LOW);
17045
17046                         dev->dev_addr[5] = lo & 0xff;
17047                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17048                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17049                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17050                         dev->dev_addr[1] = hi & 0xff;
17051                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17052                 }
17053         }
17054
17055         if (!is_valid_ether_addr(&dev->dev_addr[0]))
17056                 return -EINVAL;
17057         return 0;
17058 }
17059
17060 #define BOUNDARY_SINGLE_CACHELINE       1
17061 #define BOUNDARY_MULTI_CACHELINE        2
17062
17063 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17064 {
17065         int cacheline_size;
17066         u8 byte;
17067         int goal;
17068
17069         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17070         if (byte == 0)
17071                 cacheline_size = 1024;
17072         else
17073                 cacheline_size = (int) byte * 4;
17074
17075         /* On 5703 and later chips, the boundary bits have no
17076          * effect.
17077          */
17078         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17079             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17080             !tg3_flag(tp, PCI_EXPRESS))
17081                 goto out;
17082
17083 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17084         goal = BOUNDARY_MULTI_CACHELINE;
17085 #else
17086 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17087         goal = BOUNDARY_SINGLE_CACHELINE;
17088 #else
17089         goal = 0;
17090 #endif
17091 #endif
17092
17093         if (tg3_flag(tp, 57765_PLUS)) {
17094                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17095                 goto out;
17096         }
17097
17098         if (!goal)
17099                 goto out;
17100
17101         /* PCI controllers on most RISC systems tend to disconnect
17102          * when a device tries to burst across a cache-line boundary.
17103          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17104          *
17105          * Unfortunately, for PCI-E there are only limited
17106          * write-side controls for this, and thus for reads
17107          * we will still get the disconnects.  We'll also waste
17108          * these PCI cycles for both read and write for chips
17109          * other than 5700 and 5701 which do not implement the
17110          * boundary bits.
17111          */
17112         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17113                 switch (cacheline_size) {
17114                 case 16:
17115                 case 32:
17116                 case 64:
17117                 case 128:
17118                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17119                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17120                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17121                         } else {
17122                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17123                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17124                         }
17125                         break;
17126
17127                 case 256:
17128                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17129                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17130                         break;
17131
17132                 default:
17133                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17134                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17135                         break;
17136                 }
17137         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17138                 switch (cacheline_size) {
17139                 case 16:
17140                 case 32:
17141                 case 64:
17142                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17143                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17144                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17145                                 break;
17146                         }
17147                         /* fallthrough */
17148                 case 128:
17149                 default:
17150                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17151                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17152                         break;
17153                 }
17154         } else {
17155                 switch (cacheline_size) {
17156                 case 16:
17157                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17158                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17159                                         DMA_RWCTRL_WRITE_BNDRY_16);
17160                                 break;
17161                         }
17162                         /* fallthrough */
17163                 case 32:
17164                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17165                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17166                                         DMA_RWCTRL_WRITE_BNDRY_32);
17167                                 break;
17168                         }
17169                         /* fallthrough */
17170                 case 64:
17171                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17172                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17173                                         DMA_RWCTRL_WRITE_BNDRY_64);
17174                                 break;
17175                         }
17176                         /* fallthrough */
17177                 case 128:
17178                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17179                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17180                                         DMA_RWCTRL_WRITE_BNDRY_128);
17181                                 break;
17182                         }
17183                         /* fallthrough */
17184                 case 256:
17185                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17186                                 DMA_RWCTRL_WRITE_BNDRY_256);
17187                         break;
17188                 case 512:
17189                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17190                                 DMA_RWCTRL_WRITE_BNDRY_512);
17191                         break;
17192                 case 1024:
17193                 default:
17194                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17195                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17196                         break;
17197                 }
17198         }
17199
17200 out:
17201         return val;
17202 }
17203
17204 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17205                            int size, bool to_device)
17206 {
17207         struct tg3_internal_buffer_desc test_desc;
17208         u32 sram_dma_descs;
17209         int i, ret;
17210
17211         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17212
17213         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17214         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17215         tw32(RDMAC_STATUS, 0);
17216         tw32(WDMAC_STATUS, 0);
17217
17218         tw32(BUFMGR_MODE, 0);
17219         tw32(FTQ_RESET, 0);
17220
17221         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17222         test_desc.addr_lo = buf_dma & 0xffffffff;
17223         test_desc.nic_mbuf = 0x00002100;
17224         test_desc.len = size;
17225
17226         /*
17227          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17228          * the *second* time the tg3 driver was getting loaded after an
17229          * initial scan.
17230          *
17231          * Broadcom tells me:
17232          *   ...the DMA engine is connected to the GRC block and a DMA
17233          *   reset may affect the GRC block in some unpredictable way...
17234          *   The behavior of resets to individual blocks has not been tested.
17235          *
17236          * Broadcom noted the GRC reset will also reset all sub-components.
17237          */
17238         if (to_device) {
17239                 test_desc.cqid_sqid = (13 << 8) | 2;
17240
17241                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17242                 udelay(40);
17243         } else {
17244                 test_desc.cqid_sqid = (16 << 8) | 7;
17245
17246                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17247                 udelay(40);
17248         }
17249         test_desc.flags = 0x00000005;
17250
17251         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17252                 u32 val;
17253
17254                 val = *(((u32 *)&test_desc) + i);
17255                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17256                                        sram_dma_descs + (i * sizeof(u32)));
17257                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17258         }
17259         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17260
17261         if (to_device)
17262                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17263         else
17264                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17265
17266         ret = -ENODEV;
17267         for (i = 0; i < 40; i++) {
17268                 u32 val;
17269
17270                 if (to_device)
17271                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17272                 else
17273                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17274                 if ((val & 0xffff) == sram_dma_descs) {
17275                         ret = 0;
17276                         break;
17277                 }
17278
17279                 udelay(100);
17280         }
17281
17282         return ret;
17283 }
17284
17285 #define TEST_BUFFER_SIZE        0x2000
17286
17287 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17288         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17289         { },
17290 };
17291
17292 static int tg3_test_dma(struct tg3 *tp)
17293 {
17294         dma_addr_t buf_dma;
17295         u32 *buf, saved_dma_rwctrl;
17296         int ret = 0;
17297
17298         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17299                                  &buf_dma, GFP_KERNEL);
17300         if (!buf) {
17301                 ret = -ENOMEM;
17302                 goto out_nofree;
17303         }
17304
17305         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17306                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17307
17308         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17309
17310         if (tg3_flag(tp, 57765_PLUS))
17311                 goto out;
17312
17313         if (tg3_flag(tp, PCI_EXPRESS)) {
17314                 /* DMA read watermark not used on PCIE */
17315                 tp->dma_rwctrl |= 0x00180000;
17316         } else if (!tg3_flag(tp, PCIX_MODE)) {
17317                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17318                     tg3_asic_rev(tp) == ASIC_REV_5750)
17319                         tp->dma_rwctrl |= 0x003f0000;
17320                 else
17321                         tp->dma_rwctrl |= 0x003f000f;
17322         } else {
17323                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17324                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17325                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17326                         u32 read_water = 0x7;
17327
17328                         /* If the 5704 is behind the EPB bridge, we can
17329                          * do the less restrictive ONE_DMA workaround for
17330                          * better performance.
17331                          */
17332                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17333                             tg3_asic_rev(tp) == ASIC_REV_5704)
17334                                 tp->dma_rwctrl |= 0x8000;
17335                         else if (ccval == 0x6 || ccval == 0x7)
17336                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17337
17338                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17339                                 read_water = 4;
17340                         /* Set bit 23 to enable PCIX hw bug fix */
17341                         tp->dma_rwctrl |=
17342                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17343                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17344                                 (1 << 23);
17345                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17346                         /* 5780 always in PCIX mode */
17347                         tp->dma_rwctrl |= 0x00144000;
17348                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17349                         /* 5714 always in PCIX mode */
17350                         tp->dma_rwctrl |= 0x00148000;
17351                 } else {
17352                         tp->dma_rwctrl |= 0x001b000f;
17353                 }
17354         }
17355         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17356                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17357
17358         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17359             tg3_asic_rev(tp) == ASIC_REV_5704)
17360                 tp->dma_rwctrl &= 0xfffffff0;
17361
17362         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17363             tg3_asic_rev(tp) == ASIC_REV_5701) {
17364                 /* Remove this if it causes problems for some boards. */
17365                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17366
17367                 /* On 5700/5701 chips, we need to set this bit.
17368                  * Otherwise the chip will issue cacheline transactions
17369                  * to streamable DMA memory with not all the byte
17370                  * enables turned on.  This is an error on several
17371                  * RISC PCI controllers, in particular sparc64.
17372                  *
17373                  * On 5703/5704 chips, this bit has been reassigned
17374                  * a different meaning.  In particular, it is used
17375                  * on those chips to enable a PCI-X workaround.
17376                  */
17377                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17378         }
17379
17380         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17381
17382
17383         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17384             tg3_asic_rev(tp) != ASIC_REV_5701)
17385                 goto out;
17386
17387         /* It is best to perform DMA test with maximum write burst size
17388          * to expose the 5700/5701 write DMA bug.
17389          */
17390         saved_dma_rwctrl = tp->dma_rwctrl;
17391         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17392         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17393
17394         while (1) {
17395                 u32 *p = buf, i;
17396
17397                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17398                         p[i] = i;
17399
17400                 /* Send the buffer to the chip. */
17401                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17402                 if (ret) {
17403                         dev_err(&tp->pdev->dev,
17404                                 "%s: Buffer write failed. err = %d\n",
17405                                 __func__, ret);
17406                         break;
17407                 }
17408
17409                 /* Now read it back. */
17410                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17411                 if (ret) {
17412                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17413                                 "err = %d\n", __func__, ret);
17414                         break;
17415                 }
17416
17417                 /* Verify it. */
17418                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17419                         if (p[i] == i)
17420                                 continue;
17421
17422                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17423                             DMA_RWCTRL_WRITE_BNDRY_16) {
17424                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17425                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17426                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17427                                 break;
17428                         } else {
17429                                 dev_err(&tp->pdev->dev,
17430                                         "%s: Buffer corrupted on read back! "
17431                                         "(%d != %d)\n", __func__, p[i], i);
17432                                 ret = -ENODEV;
17433                                 goto out;
17434                         }
17435                 }
17436
17437                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17438                         /* Success. */
17439                         ret = 0;
17440                         break;
17441                 }
17442         }
17443         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17444             DMA_RWCTRL_WRITE_BNDRY_16) {
17445                 /* DMA test passed without adjusting DMA boundary,
17446                  * now look for chipsets that are known to expose the
17447                  * DMA bug without failing the test.
17448                  */
17449                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17450                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17451                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17452                 } else {
17453                         /* Safe to use the calculated DMA boundary. */
17454                         tp->dma_rwctrl = saved_dma_rwctrl;
17455                 }
17456
17457                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17458         }
17459
17460 out:
17461         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17462 out_nofree:
17463         return ret;
17464 }
17465
17466 static void tg3_init_bufmgr_config(struct tg3 *tp)
17467 {
17468         if (tg3_flag(tp, 57765_PLUS)) {
17469                 tp->bufmgr_config.mbuf_read_dma_low_water =
17470                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17471                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17472                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17473                 tp->bufmgr_config.mbuf_high_water =
17474                         DEFAULT_MB_HIGH_WATER_57765;
17475
17476                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17477                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17478                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17479                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17480                 tp->bufmgr_config.mbuf_high_water_jumbo =
17481                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17482         } else if (tg3_flag(tp, 5705_PLUS)) {
17483                 tp->bufmgr_config.mbuf_read_dma_low_water =
17484                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17485                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17486                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17487                 tp->bufmgr_config.mbuf_high_water =
17488                         DEFAULT_MB_HIGH_WATER_5705;
17489                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17490                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17491                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17492                         tp->bufmgr_config.mbuf_high_water =
17493                                 DEFAULT_MB_HIGH_WATER_5906;
17494                 }
17495
17496                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17497                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17498                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17499                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17500                 tp->bufmgr_config.mbuf_high_water_jumbo =
17501                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17502         } else {
17503                 tp->bufmgr_config.mbuf_read_dma_low_water =
17504                         DEFAULT_MB_RDMA_LOW_WATER;
17505                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17506                         DEFAULT_MB_MACRX_LOW_WATER;
17507                 tp->bufmgr_config.mbuf_high_water =
17508                         DEFAULT_MB_HIGH_WATER;
17509
17510                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17511                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17512                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17513                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17514                 tp->bufmgr_config.mbuf_high_water_jumbo =
17515                         DEFAULT_MB_HIGH_WATER_JUMBO;
17516         }
17517
17518         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17519         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17520 }
17521
17522 static char *tg3_phy_string(struct tg3 *tp)
17523 {
17524         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17525         case TG3_PHY_ID_BCM5400:        return "5400";
17526         case TG3_PHY_ID_BCM5401:        return "5401";
17527         case TG3_PHY_ID_BCM5411:        return "5411";
17528         case TG3_PHY_ID_BCM5701:        return "5701";
17529         case TG3_PHY_ID_BCM5703:        return "5703";
17530         case TG3_PHY_ID_BCM5704:        return "5704";
17531         case TG3_PHY_ID_BCM5705:        return "5705";
17532         case TG3_PHY_ID_BCM5750:        return "5750";
17533         case TG3_PHY_ID_BCM5752:        return "5752";
17534         case TG3_PHY_ID_BCM5714:        return "5714";
17535         case TG3_PHY_ID_BCM5780:        return "5780";
17536         case TG3_PHY_ID_BCM5755:        return "5755";
17537         case TG3_PHY_ID_BCM5787:        return "5787";
17538         case TG3_PHY_ID_BCM5784:        return "5784";
17539         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17540         case TG3_PHY_ID_BCM5906:        return "5906";
17541         case TG3_PHY_ID_BCM5761:        return "5761";
17542         case TG3_PHY_ID_BCM5718C:       return "5718C";
17543         case TG3_PHY_ID_BCM5718S:       return "5718S";
17544         case TG3_PHY_ID_BCM57765:       return "57765";
17545         case TG3_PHY_ID_BCM5719C:       return "5719C";
17546         case TG3_PHY_ID_BCM5720C:       return "5720C";
17547         case TG3_PHY_ID_BCM5762:        return "5762C";
17548         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17549         case 0:                 return "serdes";
17550         default:                return "unknown";
17551         }
17552 }
17553
17554 static char *tg3_bus_string(struct tg3 *tp, char *str)
17555 {
17556         if (tg3_flag(tp, PCI_EXPRESS)) {
17557                 strcpy(str, "PCI Express");
17558                 return str;
17559         } else if (tg3_flag(tp, PCIX_MODE)) {
17560                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17561
17562                 strcpy(str, "PCIX:");
17563
17564                 if ((clock_ctrl == 7) ||
17565                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17566                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17567                         strcat(str, "133MHz");
17568                 else if (clock_ctrl == 0)
17569                         strcat(str, "33MHz");
17570                 else if (clock_ctrl == 2)
17571                         strcat(str, "50MHz");
17572                 else if (clock_ctrl == 4)
17573                         strcat(str, "66MHz");
17574                 else if (clock_ctrl == 6)
17575                         strcat(str, "100MHz");
17576         } else {
17577                 strcpy(str, "PCI:");
17578                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17579                         strcat(str, "66MHz");
17580                 else
17581                         strcat(str, "33MHz");
17582         }
17583         if (tg3_flag(tp, PCI_32BIT))
17584                 strcat(str, ":32-bit");
17585         else
17586                 strcat(str, ":64-bit");
17587         return str;
17588 }
17589
17590 static void tg3_init_coal(struct tg3 *tp)
17591 {
17592         struct ethtool_coalesce *ec = &tp->coal;
17593
17594         memset(ec, 0, sizeof(*ec));
17595         ec->cmd = ETHTOOL_GCOALESCE;
17596         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17597         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17598         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17599         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17600         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17601         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17602         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17603         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17604         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17605
17606         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17607                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17608                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17609                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17610                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17611                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17612         }
17613
17614         if (tg3_flag(tp, 5705_PLUS)) {
17615                 ec->rx_coalesce_usecs_irq = 0;
17616                 ec->tx_coalesce_usecs_irq = 0;
17617                 ec->stats_block_coalesce_usecs = 0;
17618         }
17619 }
17620
17621 static int tg3_init_one(struct pci_dev *pdev,
17622                                   const struct pci_device_id *ent)
17623 {
17624         struct net_device *dev;
17625         struct tg3 *tp;
17626         int i, err;
17627         u32 sndmbx, rcvmbx, intmbx;
17628         char str[40];
17629         u64 dma_mask, persist_dma_mask;
17630         netdev_features_t features = 0;
17631
17632         printk_once(KERN_INFO "%s\n", version);
17633
17634         err = pci_enable_device(pdev);
17635         if (err) {
17636                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17637                 return err;
17638         }
17639
17640         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17641         if (err) {
17642                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17643                 goto err_out_disable_pdev;
17644         }
17645
17646         pci_set_master(pdev);
17647
17648         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17649         if (!dev) {
17650                 err = -ENOMEM;
17651                 goto err_out_free_res;
17652         }
17653
17654         SET_NETDEV_DEV(dev, &pdev->dev);
17655
17656         tp = netdev_priv(dev);
17657         tp->pdev = pdev;
17658         tp->dev = dev;
17659         tp->rx_mode = TG3_DEF_RX_MODE;
17660         tp->tx_mode = TG3_DEF_TX_MODE;
17661         tp->irq_sync = 1;
17662         tp->pcierr_recovery = false;
17663
17664         if (tg3_debug > 0)
17665                 tp->msg_enable = tg3_debug;
17666         else
17667                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17668
17669         if (pdev_is_ssb_gige_core(pdev)) {
17670                 tg3_flag_set(tp, IS_SSB_CORE);
17671                 if (ssb_gige_must_flush_posted_writes(pdev))
17672                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17673                 if (ssb_gige_one_dma_at_once(pdev))
17674                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17675                 if (ssb_gige_have_roboswitch(pdev)) {
17676                         tg3_flag_set(tp, USE_PHYLIB);
17677                         tg3_flag_set(tp, ROBOSWITCH);
17678                 }
17679                 if (ssb_gige_is_rgmii(pdev))
17680                         tg3_flag_set(tp, RGMII_MODE);
17681         }
17682
17683         /* The word/byte swap controls here control register access byte
17684          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17685          * setting below.
17686          */
17687         tp->misc_host_ctrl =
17688                 MISC_HOST_CTRL_MASK_PCI_INT |
17689                 MISC_HOST_CTRL_WORD_SWAP |
17690                 MISC_HOST_CTRL_INDIR_ACCESS |
17691                 MISC_HOST_CTRL_PCISTATE_RW;
17692
17693         /* The NONFRM (non-frame) byte/word swap controls take effect
17694          * on descriptor entries, anything which isn't packet data.
17695          *
17696          * The StrongARM chips on the board (one for tx, one for rx)
17697          * are running in big-endian mode.
17698          */
17699         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17700                         GRC_MODE_WSWAP_NONFRM_DATA);
17701 #ifdef __BIG_ENDIAN
17702         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17703 #endif
17704         spin_lock_init(&tp->lock);
17705         spin_lock_init(&tp->indirect_lock);
17706         INIT_WORK(&tp->reset_task, tg3_reset_task);
17707
17708         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17709         if (!tp->regs) {
17710                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17711                 err = -ENOMEM;
17712                 goto err_out_free_dev;
17713         }
17714
17715         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17716             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17717             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17718             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17719             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17720             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17722             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17729             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17730                 tg3_flag_set(tp, ENABLE_APE);
17731                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17732                 if (!tp->aperegs) {
17733                         dev_err(&pdev->dev,
17734                                 "Cannot map APE registers, aborting\n");
17735                         err = -ENOMEM;
17736                         goto err_out_iounmap;
17737                 }
17738         }
17739
17740         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17741         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17742
17743         dev->ethtool_ops = &tg3_ethtool_ops;
17744         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17745         dev->netdev_ops = &tg3_netdev_ops;
17746         dev->irq = pdev->irq;
17747
17748         err = tg3_get_invariants(tp, ent);
17749         if (err) {
17750                 dev_err(&pdev->dev,
17751                         "Problem fetching invariants of chip, aborting\n");
17752                 goto err_out_apeunmap;
17753         }
17754
17755         /* The EPB bridge inside 5714, 5715, and 5780 and any
17756          * device behind the EPB cannot support DMA addresses > 40-bit.
17757          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17758          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17759          * do DMA address check in tg3_start_xmit().
17760          */
17761         if (tg3_flag(tp, IS_5788))
17762                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17763         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17764                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17765 #ifdef CONFIG_HIGHMEM
17766                 dma_mask = DMA_BIT_MASK(64);
17767 #endif
17768         } else
17769                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17770
17771         /* Configure DMA attributes. */
17772         if (dma_mask > DMA_BIT_MASK(32)) {
17773                 err = pci_set_dma_mask(pdev, dma_mask);
17774                 if (!err) {
17775                         features |= NETIF_F_HIGHDMA;
17776                         err = pci_set_consistent_dma_mask(pdev,
17777                                                           persist_dma_mask);
17778                         if (err < 0) {
17779                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17780                                         "DMA for consistent allocations\n");
17781                                 goto err_out_apeunmap;
17782                         }
17783                 }
17784         }
17785         if (err || dma_mask == DMA_BIT_MASK(32)) {
17786                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17787                 if (err) {
17788                         dev_err(&pdev->dev,
17789                                 "No usable DMA configuration, aborting\n");
17790                         goto err_out_apeunmap;
17791                 }
17792         }
17793
17794         tg3_init_bufmgr_config(tp);
17795
17796         /* 5700 B0 chips do not support checksumming correctly due
17797          * to hardware bugs.
17798          */
17799         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17800                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17801
17802                 if (tg3_flag(tp, 5755_PLUS))
17803                         features |= NETIF_F_IPV6_CSUM;
17804         }
17805
17806         /* TSO is on by default on chips that support hardware TSO.
17807          * Firmware TSO on older chips gives lower performance, so it
17808          * is off by default, but can be enabled using ethtool.
17809          */
17810         if ((tg3_flag(tp, HW_TSO_1) ||
17811              tg3_flag(tp, HW_TSO_2) ||
17812              tg3_flag(tp, HW_TSO_3)) &&
17813             (features & NETIF_F_IP_CSUM))
17814                 features |= NETIF_F_TSO;
17815         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17816                 if (features & NETIF_F_IPV6_CSUM)
17817                         features |= NETIF_F_TSO6;
17818                 if (tg3_flag(tp, HW_TSO_3) ||
17819                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17820                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17821                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17822                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17823                     tg3_asic_rev(tp) == ASIC_REV_57780)
17824                         features |= NETIF_F_TSO_ECN;
17825         }
17826
17827         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17828                          NETIF_F_HW_VLAN_CTAG_RX;
17829         dev->vlan_features |= features;
17830
17831         /*
17832          * Add loopback capability only for a subset of devices that support
17833          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17834          * loopback for the remaining devices.
17835          */
17836         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17837             !tg3_flag(tp, CPMU_PRESENT))
17838                 /* Add the loopback capability */
17839                 features |= NETIF_F_LOOPBACK;
17840
17841         dev->hw_features |= features;
17842         dev->priv_flags |= IFF_UNICAST_FLT;
17843
17844         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17845         dev->min_mtu = TG3_MIN_MTU;
17846         dev->max_mtu = TG3_MAX_MTU(tp);
17847
17848         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17849             !tg3_flag(tp, TSO_CAPABLE) &&
17850             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17851                 tg3_flag_set(tp, MAX_RXPEND_64);
17852                 tp->rx_pending = 63;
17853         }
17854
17855         err = tg3_get_device_address(tp);
17856         if (err) {
17857                 dev_err(&pdev->dev,
17858                         "Could not obtain valid ethernet address, aborting\n");
17859                 goto err_out_apeunmap;
17860         }
17861
17862         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17863         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17864         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17865         for (i = 0; i < tp->irq_max; i++) {
17866                 struct tg3_napi *tnapi = &tp->napi[i];
17867
17868                 tnapi->tp = tp;
17869                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17870
17871                 tnapi->int_mbox = intmbx;
17872                 if (i <= 4)
17873                         intmbx += 0x8;
17874                 else
17875                         intmbx += 0x4;
17876
17877                 tnapi->consmbox = rcvmbx;
17878                 tnapi->prodmbox = sndmbx;
17879
17880                 if (i)
17881                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17882                 else
17883                         tnapi->coal_now = HOSTCC_MODE_NOW;
17884
17885                 if (!tg3_flag(tp, SUPPORT_MSIX))
17886                         break;
17887
17888                 /*
17889                  * If we support MSIX, we'll be using RSS.  If we're using
17890                  * RSS, the first vector only handles link interrupts and the
17891                  * remaining vectors handle rx and tx interrupts.  Reuse the
17892                  * mailbox values for the next iteration.  The values we setup
17893                  * above are still useful for the single vectored mode.
17894                  */
17895                 if (!i)
17896                         continue;
17897
17898                 rcvmbx += 0x8;
17899
17900                 if (sndmbx & 0x4)
17901                         sndmbx -= 0x4;
17902                 else
17903                         sndmbx += 0xc;
17904         }
17905
17906         /*
17907          * Reset chip in case UNDI or EFI driver did not shutdown
17908          * DMA self test will enable WDMAC and we'll see (spurious)
17909          * pending DMA on the PCI bus at that point.
17910          */
17911         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17912             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17913                 tg3_full_lock(tp, 0);
17914                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17915                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17916                 tg3_full_unlock(tp);
17917         }
17918
17919         err = tg3_test_dma(tp);
17920         if (err) {
17921                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17922                 goto err_out_apeunmap;
17923         }
17924
17925         tg3_init_coal(tp);
17926
17927         pci_set_drvdata(pdev, dev);
17928
17929         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17930             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17931             tg3_asic_rev(tp) == ASIC_REV_5762)
17932                 tg3_flag_set(tp, PTP_CAPABLE);
17933
17934         tg3_timer_init(tp);
17935
17936         tg3_carrier_off(tp);
17937
17938         err = register_netdev(dev);
17939         if (err) {
17940                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17941                 goto err_out_apeunmap;
17942         }
17943
17944         if (tg3_flag(tp, PTP_CAPABLE)) {
17945                 tg3_ptp_init(tp);
17946                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17947                                                    &tp->pdev->dev);
17948                 if (IS_ERR(tp->ptp_clock))
17949                         tp->ptp_clock = NULL;
17950         }
17951
17952         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17953                     tp->board_part_number,
17954                     tg3_chip_rev_id(tp),
17955                     tg3_bus_string(tp, str),
17956                     dev->dev_addr);
17957
17958         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17959                 char *ethtype;
17960
17961                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17962                         ethtype = "10/100Base-TX";
17963                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17964                         ethtype = "1000Base-SX";
17965                 else
17966                         ethtype = "10/100/1000Base-T";
17967
17968                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17969                             "(WireSpeed[%d], EEE[%d])\n",
17970                             tg3_phy_string(tp), ethtype,
17971                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17972                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17973         }
17974
17975         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17976                     (dev->features & NETIF_F_RXCSUM) != 0,
17977                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17978                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17979                     tg3_flag(tp, ENABLE_ASF) != 0,
17980                     tg3_flag(tp, TSO_CAPABLE) != 0);
17981         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17982                     tp->dma_rwctrl,
17983                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17984                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17985
17986         pci_save_state(pdev);
17987
17988         return 0;
17989
17990 err_out_apeunmap:
17991         if (tp->aperegs) {
17992                 iounmap(tp->aperegs);
17993                 tp->aperegs = NULL;
17994         }
17995
17996 err_out_iounmap:
17997         if (tp->regs) {
17998                 iounmap(tp->regs);
17999                 tp->regs = NULL;
18000         }
18001
18002 err_out_free_dev:
18003         free_netdev(dev);
18004
18005 err_out_free_res:
18006         pci_release_regions(pdev);
18007
18008 err_out_disable_pdev:
18009         if (pci_is_enabled(pdev))
18010                 pci_disable_device(pdev);
18011         return err;
18012 }
18013
18014 static void tg3_remove_one(struct pci_dev *pdev)
18015 {
18016         struct net_device *dev = pci_get_drvdata(pdev);
18017
18018         if (dev) {
18019                 struct tg3 *tp = netdev_priv(dev);
18020
18021                 tg3_ptp_fini(tp);
18022
18023                 release_firmware(tp->fw);
18024
18025                 tg3_reset_task_cancel(tp);
18026
18027                 if (tg3_flag(tp, USE_PHYLIB)) {
18028                         tg3_phy_fini(tp);
18029                         tg3_mdio_fini(tp);
18030                 }
18031
18032                 unregister_netdev(dev);
18033                 if (tp->aperegs) {
18034                         iounmap(tp->aperegs);
18035                         tp->aperegs = NULL;
18036                 }
18037                 if (tp->regs) {
18038                         iounmap(tp->regs);
18039                         tp->regs = NULL;
18040                 }
18041                 free_netdev(dev);
18042                 pci_release_regions(pdev);
18043                 pci_disable_device(pdev);
18044         }
18045 }
18046
18047 #ifdef CONFIG_PM_SLEEP
18048 static int tg3_suspend(struct device *device)
18049 {
18050         struct pci_dev *pdev = to_pci_dev(device);
18051         struct net_device *dev = pci_get_drvdata(pdev);
18052         struct tg3 *tp = netdev_priv(dev);
18053         int err = 0;
18054
18055         rtnl_lock();
18056
18057         if (!netif_running(dev))
18058                 goto unlock;
18059
18060         tg3_reset_task_cancel(tp);
18061         tg3_phy_stop(tp);
18062         tg3_netif_stop(tp);
18063
18064         tg3_timer_stop(tp);
18065
18066         tg3_full_lock(tp, 1);
18067         tg3_disable_ints(tp);
18068         tg3_full_unlock(tp);
18069
18070         netif_device_detach(dev);
18071
18072         tg3_full_lock(tp, 0);
18073         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18074         tg3_flag_clear(tp, INIT_COMPLETE);
18075         tg3_full_unlock(tp);
18076
18077         err = tg3_power_down_prepare(tp);
18078         if (err) {
18079                 int err2;
18080
18081                 tg3_full_lock(tp, 0);
18082
18083                 tg3_flag_set(tp, INIT_COMPLETE);
18084                 err2 = tg3_restart_hw(tp, true);
18085                 if (err2)
18086                         goto out;
18087
18088                 tg3_timer_start(tp);
18089
18090                 netif_device_attach(dev);
18091                 tg3_netif_start(tp);
18092
18093 out:
18094                 tg3_full_unlock(tp);
18095
18096                 if (!err2)
18097                         tg3_phy_start(tp);
18098         }
18099
18100 unlock:
18101         rtnl_unlock();
18102         return err;
18103 }
18104
18105 static int tg3_resume(struct device *device)
18106 {
18107         struct pci_dev *pdev = to_pci_dev(device);
18108         struct net_device *dev = pci_get_drvdata(pdev);
18109         struct tg3 *tp = netdev_priv(dev);
18110         int err = 0;
18111
18112         rtnl_lock();
18113
18114         if (!netif_running(dev))
18115                 goto unlock;
18116
18117         netif_device_attach(dev);
18118
18119         tg3_full_lock(tp, 0);
18120
18121         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18122
18123         tg3_flag_set(tp, INIT_COMPLETE);
18124         err = tg3_restart_hw(tp,
18125                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18126         if (err)
18127                 goto out;
18128
18129         tg3_timer_start(tp);
18130
18131         tg3_netif_start(tp);
18132
18133 out:
18134         tg3_full_unlock(tp);
18135
18136         if (!err)
18137                 tg3_phy_start(tp);
18138
18139 unlock:
18140         rtnl_unlock();
18141         return err;
18142 }
18143 #endif /* CONFIG_PM_SLEEP */
18144
18145 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18146
18147 static void tg3_shutdown(struct pci_dev *pdev)
18148 {
18149         struct net_device *dev = pci_get_drvdata(pdev);
18150         struct tg3 *tp = netdev_priv(dev);
18151
18152         rtnl_lock();
18153         netif_device_detach(dev);
18154
18155         if (netif_running(dev))
18156                 dev_close(dev);
18157
18158         if (system_state == SYSTEM_POWER_OFF)
18159                 tg3_power_down(tp);
18160
18161         rtnl_unlock();
18162 }
18163
18164 /**
18165  * tg3_io_error_detected - called when PCI error is detected
18166  * @pdev: Pointer to PCI device
18167  * @state: The current pci connection state
18168  *
18169  * This function is called after a PCI bus error affecting
18170  * this device has been detected.
18171  */
18172 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18173                                               pci_channel_state_t state)
18174 {
18175         struct net_device *netdev = pci_get_drvdata(pdev);
18176         struct tg3 *tp = netdev_priv(netdev);
18177         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18178
18179         netdev_info(netdev, "PCI I/O error detected\n");
18180
18181         rtnl_lock();
18182
18183         /* We probably don't have netdev yet */
18184         if (!netdev || !netif_running(netdev))
18185                 goto done;
18186
18187         /* We needn't recover from permanent error */
18188         if (state == pci_channel_io_frozen)
18189                 tp->pcierr_recovery = true;
18190
18191         tg3_phy_stop(tp);
18192
18193         tg3_netif_stop(tp);
18194
18195         tg3_timer_stop(tp);
18196
18197         /* Want to make sure that the reset task doesn't run */
18198         tg3_reset_task_cancel(tp);
18199
18200         netif_device_detach(netdev);
18201
18202         /* Clean up software state, even if MMIO is blocked */
18203         tg3_full_lock(tp, 0);
18204         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18205         tg3_full_unlock(tp);
18206
18207 done:
18208         if (state == pci_channel_io_perm_failure) {
18209                 if (netdev) {
18210                         tg3_napi_enable(tp);
18211                         dev_close(netdev);
18212                 }
18213                 err = PCI_ERS_RESULT_DISCONNECT;
18214         } else {
18215                 pci_disable_device(pdev);
18216         }
18217
18218         rtnl_unlock();
18219
18220         return err;
18221 }
18222
18223 /**
18224  * tg3_io_slot_reset - called after the pci bus has been reset.
18225  * @pdev: Pointer to PCI device
18226  *
18227  * Restart the card from scratch, as if from a cold-boot.
18228  * At this point, the card has exprienced a hard reset,
18229  * followed by fixups by BIOS, and has its config space
18230  * set up identically to what it was at cold boot.
18231  */
18232 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18233 {
18234         struct net_device *netdev = pci_get_drvdata(pdev);
18235         struct tg3 *tp = netdev_priv(netdev);
18236         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18237         int err;
18238
18239         rtnl_lock();
18240
18241         if (pci_enable_device(pdev)) {
18242                 dev_err(&pdev->dev,
18243                         "Cannot re-enable PCI device after reset.\n");
18244                 goto done;
18245         }
18246
18247         pci_set_master(pdev);
18248         pci_restore_state(pdev);
18249         pci_save_state(pdev);
18250
18251         if (!netdev || !netif_running(netdev)) {
18252                 rc = PCI_ERS_RESULT_RECOVERED;
18253                 goto done;
18254         }
18255
18256         err = tg3_power_up(tp);
18257         if (err)
18258                 goto done;
18259
18260         rc = PCI_ERS_RESULT_RECOVERED;
18261
18262 done:
18263         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18264                 tg3_napi_enable(tp);
18265                 dev_close(netdev);
18266         }
18267         rtnl_unlock();
18268
18269         return rc;
18270 }
18271
18272 /**
18273  * tg3_io_resume - called when traffic can start flowing again.
18274  * @pdev: Pointer to PCI device
18275  *
18276  * This callback is called when the error recovery driver tells
18277  * us that its OK to resume normal operation.
18278  */
18279 static void tg3_io_resume(struct pci_dev *pdev)
18280 {
18281         struct net_device *netdev = pci_get_drvdata(pdev);
18282         struct tg3 *tp = netdev_priv(netdev);
18283         int err;
18284
18285         rtnl_lock();
18286
18287         if (!netdev || !netif_running(netdev))
18288                 goto done;
18289
18290         tg3_full_lock(tp, 0);
18291         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18292         tg3_flag_set(tp, INIT_COMPLETE);
18293         err = tg3_restart_hw(tp, true);
18294         if (err) {
18295                 tg3_full_unlock(tp);
18296                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18297                 goto done;
18298         }
18299
18300         netif_device_attach(netdev);
18301
18302         tg3_timer_start(tp);
18303
18304         tg3_netif_start(tp);
18305
18306         tg3_full_unlock(tp);
18307
18308         tg3_phy_start(tp);
18309
18310 done:
18311         tp->pcierr_recovery = false;
18312         rtnl_unlock();
18313 }
18314
18315 static const struct pci_error_handlers tg3_err_handler = {
18316         .error_detected = tg3_io_error_detected,
18317         .slot_reset     = tg3_io_slot_reset,
18318         .resume         = tg3_io_resume
18319 };
18320
18321 static struct pci_driver tg3_driver = {
18322         .name           = DRV_MODULE_NAME,
18323         .id_table       = tg3_pci_tbl,
18324         .probe          = tg3_init_one,
18325         .remove         = tg3_remove_one,
18326         .err_handler    = &tg3_err_handler,
18327         .driver.pm      = &tg3_pm_ops,
18328         .shutdown       = tg3_shutdown,
18329 };
18330
18331 module_pci_driver(tg3_driver);