]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
drivers: Remove explicit invocations of mmiowb()
[linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 #define TG3_MAJ_NUM                     3
100 #define TG3_MIN_NUM                     137
101 #define DRV_MODULE_VERSION      \
102         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE      "May 11, 2014"
104
105 #define RESET_KIND_SHUTDOWN     0
106 #define RESET_KIND_INIT         1
107 #define RESET_KIND_SUSPEND      2
108
109 #define TG3_DEF_RX_MODE         0
110 #define TG3_DEF_TX_MODE         0
111 #define TG3_DEF_MSG_ENABLE        \
112         (NETIF_MSG_DRV          | \
113          NETIF_MSG_PROBE        | \
114          NETIF_MSG_LINK         | \
115          NETIF_MSG_TIMER        | \
116          NETIF_MSG_IFDOWN       | \
117          NETIF_MSG_IFUP         | \
118          NETIF_MSG_RX_ERR       | \
119          NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
122
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126
127 #define TG3_TX_TIMEOUT                  (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU                     ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING         200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153
154 #define TG3_TX_RING_SIZE                512
155 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
164                                  TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB               64
168
169 #define TG3_RX_STD_DMA_SZ               1536
170 #define TG3_RX_JMB_DMA_SZ               9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD           256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
197 #else
198         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K            2048
210 #define TG3_TX_BD_DMA_MAX_4K            4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
218 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3            "tigon/tg3.bin"
221 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
224
225 static char version[] =
226         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235
236 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
242
243 static const struct pci_device_id tg3_pci_tbl[] = {
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267                         TG3_DRV_DATA_FLAG_5705_10_100},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271                         TG3_DRV_DATA_FLAG_5705_10_100},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293                         PCI_VENDOR_ID_LENOVO,
294                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359         {}
360 };
361
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363
364 static const struct {
365         const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367         { "rx_octets" },
368         { "rx_fragments" },
369         { "rx_ucast_packets" },
370         { "rx_mcast_packets" },
371         { "rx_bcast_packets" },
372         { "rx_fcs_errors" },
373         { "rx_align_errors" },
374         { "rx_xon_pause_rcvd" },
375         { "rx_xoff_pause_rcvd" },
376         { "rx_mac_ctrl_rcvd" },
377         { "rx_xoff_entered" },
378         { "rx_frame_too_long_errors" },
379         { "rx_jabbers" },
380         { "rx_undersize_packets" },
381         { "rx_in_length_errors" },
382         { "rx_out_length_errors" },
383         { "rx_64_or_less_octet_packets" },
384         { "rx_65_to_127_octet_packets" },
385         { "rx_128_to_255_octet_packets" },
386         { "rx_256_to_511_octet_packets" },
387         { "rx_512_to_1023_octet_packets" },
388         { "rx_1024_to_1522_octet_packets" },
389         { "rx_1523_to_2047_octet_packets" },
390         { "rx_2048_to_4095_octet_packets" },
391         { "rx_4096_to_8191_octet_packets" },
392         { "rx_8192_to_9022_octet_packets" },
393
394         { "tx_octets" },
395         { "tx_collisions" },
396
397         { "tx_xon_sent" },
398         { "tx_xoff_sent" },
399         { "tx_flow_control" },
400         { "tx_mac_errors" },
401         { "tx_single_collisions" },
402         { "tx_mult_collisions" },
403         { "tx_deferred" },
404         { "tx_excessive_collisions" },
405         { "tx_late_collisions" },
406         { "tx_collide_2times" },
407         { "tx_collide_3times" },
408         { "tx_collide_4times" },
409         { "tx_collide_5times" },
410         { "tx_collide_6times" },
411         { "tx_collide_7times" },
412         { "tx_collide_8times" },
413         { "tx_collide_9times" },
414         { "tx_collide_10times" },
415         { "tx_collide_11times" },
416         { "tx_collide_12times" },
417         { "tx_collide_13times" },
418         { "tx_collide_14times" },
419         { "tx_collide_15times" },
420         { "tx_ucast_packets" },
421         { "tx_mcast_packets" },
422         { "tx_bcast_packets" },
423         { "tx_carrier_sense_errors" },
424         { "tx_discards" },
425         { "tx_errors" },
426
427         { "dma_writeq_full" },
428         { "dma_write_prioq_full" },
429         { "rxbds_empty" },
430         { "rx_discards" },
431         { "rx_errors" },
432         { "rx_threshold_hit" },
433
434         { "dma_readq_full" },
435         { "dma_read_prioq_full" },
436         { "tx_comp_queue_full" },
437
438         { "ring_set_send_prod_index" },
439         { "ring_status_update" },
440         { "nic_irqs" },
441         { "nic_avoided_irqs" },
442         { "nic_tx_threshold_hit" },
443
444         { "mbuf_lwm_thresh_hit" },
445 };
446
447 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST          0
449 #define TG3_LINK_TEST           1
450 #define TG3_REGISTER_TEST       2
451 #define TG3_MEMORY_TEST         3
452 #define TG3_MAC_LOOPB_TEST      4
453 #define TG3_PHY_LOOPB_TEST      5
454 #define TG3_EXT_LOOPB_TEST      6
455 #define TG3_INTERRUPT_TEST      7
456
457
458 static const struct {
459         const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
462         [TG3_LINK_TEST]         = { "link test         (online) " },
463         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
464         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
465         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
466         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
467         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
468         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
469 };
470
471 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
472
473
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476         writel(val, tp->regs + off);
477 }
478
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481         return readl(tp->regs + off);
482 }
483
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486         writel(val, tp->aperegs + off);
487 }
488
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491         return readl(tp->aperegs + off);
492 }
493
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496         unsigned long flags;
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506         writel(val, tp->regs + off);
507         readl(tp->regs + off);
508 }
509
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512         unsigned long flags;
513         u32 val;
514
515         spin_lock_irqsave(&tp->indirect_lock, flags);
516         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518         spin_unlock_irqrestore(&tp->indirect_lock, flags);
519         return val;
520 }
521
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         unsigned long flags;
525
526         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528                                        TG3_64BIT_REG_LOW, val);
529                 return;
530         }
531         if (off == TG3_RX_STD_PROD_IDX_REG) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533                                        TG3_64BIT_REG_LOW, val);
534                 return;
535         }
536
537         spin_lock_irqsave(&tp->indirect_lock, flags);
538         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540         spin_unlock_irqrestore(&tp->indirect_lock, flags);
541
542         /* In indirect mode when disabling interrupts, we also need
543          * to clear the interrupt bit in the GRC local ctrl register.
544          */
545         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546             (val == 0x1)) {
547                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549         }
550 }
551
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554         unsigned long flags;
555         u32 val;
556
557         spin_lock_irqsave(&tp->indirect_lock, flags);
558         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560         spin_unlock_irqrestore(&tp->indirect_lock, flags);
561         return val;
562 }
563
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572                 /* Non-posted methods */
573                 tp->write32(tp, off, val);
574         else {
575                 /* Posted method */
576                 tg3_write32(tp, off, val);
577                 if (usec_wait)
578                         udelay(usec_wait);
579                 tp->read32(tp, off);
580         }
581         /* Wait again after the read for the posted method to guarantee that
582          * the wait time is met.
583          */
584         if (usec_wait)
585                 udelay(usec_wait);
586 }
587
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590         tp->write32_mbox(tp, off, val);
591         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593              !tg3_flag(tp, ICH_WORKAROUND)))
594                 tp->read32_mbox(tp, off);
595 }
596
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599         void __iomem *mbox = tp->regs + off;
600         writel(val, mbox);
601         if (tg3_flag(tp, TXD_MBOX_HWBUG))
602                 writel(val, mbox);
603         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604             tg3_flag(tp, FLUSH_POSTED_WRITES))
605                 readl(mbox);
606 }
607
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610         return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615         writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617
618 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
623
624 #define tw32(reg, val)                  tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)                       tp->read32(tp, reg)
628
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631         unsigned long flags;
632
633         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635                 return;
636
637         spin_lock_irqsave(&tp->indirect_lock, flags);
638         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         } else {
645                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         }
651         spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656         unsigned long flags;
657
658         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660                 *val = 0;
661                 return;
662         }
663
664         spin_lock_irqsave(&tp->indirect_lock, flags);
665         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668
669                 /* Always leave this as zero. */
670                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         } else {
672                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673                 *val = tr32(TG3PCI_MEM_WIN_DATA);
674
675                 /* Always leave this as zero. */
676                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677         }
678         spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683         int i;
684         u32 regbase, bit;
685
686         if (tg3_asic_rev(tp) == ASIC_REV_5761)
687                 regbase = TG3_APE_LOCK_GRANT;
688         else
689                 regbase = TG3_APE_PER_LOCK_GRANT;
690
691         /* Make sure the driver hasn't any stale locks. */
692         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693                 switch (i) {
694                 case TG3_APE_LOCK_PHY0:
695                 case TG3_APE_LOCK_PHY1:
696                 case TG3_APE_LOCK_PHY2:
697                 case TG3_APE_LOCK_PHY3:
698                         bit = APE_LOCK_GRANT_DRIVER;
699                         break;
700                 default:
701                         if (!tp->pci_fn)
702                                 bit = APE_LOCK_GRANT_DRIVER;
703                         else
704                                 bit = 1 << tp->pci_fn;
705                 }
706                 tg3_ape_write32(tp, regbase + 4 * i, bit);
707         }
708
709 }
710
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713         int i, off;
714         int ret = 0;
715         u32 status, req, gnt, bit;
716
717         if (!tg3_flag(tp, ENABLE_APE))
718                 return 0;
719
720         switch (locknum) {
721         case TG3_APE_LOCK_GPIO:
722                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723                         return 0;
724                 /* fall through */
725         case TG3_APE_LOCK_GRC:
726         case TG3_APE_LOCK_MEM:
727                 if (!tp->pci_fn)
728                         bit = APE_LOCK_REQ_DRIVER;
729                 else
730                         bit = 1 << tp->pci_fn;
731                 break;
732         case TG3_APE_LOCK_PHY0:
733         case TG3_APE_LOCK_PHY1:
734         case TG3_APE_LOCK_PHY2:
735         case TG3_APE_LOCK_PHY3:
736                 bit = APE_LOCK_REQ_DRIVER;
737                 break;
738         default:
739                 return -EINVAL;
740         }
741
742         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
743                 req = TG3_APE_LOCK_REQ;
744                 gnt = TG3_APE_LOCK_GRANT;
745         } else {
746                 req = TG3_APE_PER_LOCK_REQ;
747                 gnt = TG3_APE_PER_LOCK_GRANT;
748         }
749
750         off = 4 * locknum;
751
752         tg3_ape_write32(tp, req + off, bit);
753
754         /* Wait for up to 1 millisecond to acquire lock. */
755         for (i = 0; i < 100; i++) {
756                 status = tg3_ape_read32(tp, gnt + off);
757                 if (status == bit)
758                         break;
759                 if (pci_channel_offline(tp->pdev))
760                         break;
761
762                 udelay(10);
763         }
764
765         if (status != bit) {
766                 /* Revoke the lock request. */
767                 tg3_ape_write32(tp, gnt + off, bit);
768                 ret = -EBUSY;
769         }
770
771         return ret;
772 }
773
774 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 {
776         u32 gnt, bit;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (locknum) {
782         case TG3_APE_LOCK_GPIO:
783                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
784                         return;
785                 /* fall through */
786         case TG3_APE_LOCK_GRC:
787         case TG3_APE_LOCK_MEM:
788                 if (!tp->pci_fn)
789                         bit = APE_LOCK_GRANT_DRIVER;
790                 else
791                         bit = 1 << tp->pci_fn;
792                 break;
793         case TG3_APE_LOCK_PHY0:
794         case TG3_APE_LOCK_PHY1:
795         case TG3_APE_LOCK_PHY2:
796         case TG3_APE_LOCK_PHY3:
797                 bit = APE_LOCK_GRANT_DRIVER;
798                 break;
799         default:
800                 return;
801         }
802
803         if (tg3_asic_rev(tp) == ASIC_REV_5761)
804                 gnt = TG3_APE_LOCK_GRANT;
805         else
806                 gnt = TG3_APE_PER_LOCK_GRANT;
807
808         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 }
810
811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 {
813         u32 apedata;
814
815         while (timeout_us) {
816                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
817                         return -EBUSY;
818
819                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
820                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
821                         break;
822
823                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
824
825                 udelay(10);
826                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827         }
828
829         return timeout_us ? 0 : -EBUSY;
830 }
831
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
834 {
835         u32 i, apedata;
836
837         for (i = 0; i < timeout_us / 10; i++) {
838                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
839
840                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
841                         break;
842
843                 udelay(10);
844         }
845
846         return i == timeout_us / 10;
847 }
848
849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
850                                    u32 len)
851 {
852         int err;
853         u32 i, bufoff, msgoff, maxlen, apedata;
854
855         if (!tg3_flag(tp, APE_HAS_NCSI))
856                 return 0;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
859         if (apedata != APE_SEG_SIG_MAGIC)
860                 return -ENODEV;
861
862         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
863         if (!(apedata & APE_FW_STATUS_READY))
864                 return -EAGAIN;
865
866         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
867                  TG3_APE_SHMEM_BASE;
868         msgoff = bufoff + 2 * sizeof(u32);
869         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870
871         while (len) {
872                 u32 length;
873
874                 /* Cap xfer sizes to scratchpad limits. */
875                 length = (len > maxlen) ? maxlen : len;
876                 len -= length;
877
878                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
879                 if (!(apedata & APE_FW_STATUS_READY))
880                         return -EAGAIN;
881
882                 /* Wait for up to 1 msec for APE to service previous event. */
883                 err = tg3_ape_event_lock(tp, 1000);
884                 if (err)
885                         return err;
886
887                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
888                           APE_EVENT_STATUS_SCRTCHPD_READ |
889                           APE_EVENT_STATUS_EVENT_PENDING;
890                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
891
892                 tg3_ape_write32(tp, bufoff, base_off);
893                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
894
895                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
896                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
897
898                 base_off += length;
899
900                 if (tg3_ape_wait_for_event(tp, 30000))
901                         return -EAGAIN;
902
903                 for (i = 0; length; i += 4, length -= 4) {
904                         u32 val = tg3_ape_read32(tp, msgoff + i);
905                         memcpy(data, &val, sizeof(u32));
906                         data++;
907                 }
908         }
909
910         return 0;
911 }
912 #endif
913
914 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 {
916         int err;
917         u32 apedata;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
920         if (apedata != APE_SEG_SIG_MAGIC)
921                 return -EAGAIN;
922
923         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
924         if (!(apedata & APE_FW_STATUS_READY))
925                 return -EAGAIN;
926
927         /* Wait for up to 20 millisecond for APE to service previous event. */
928         err = tg3_ape_event_lock(tp, 20000);
929         if (err)
930                 return err;
931
932         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
933                         event | APE_EVENT_STATUS_EVENT_PENDING);
934
935         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
936         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937
938         return 0;
939 }
940
941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 {
943         u32 event;
944         u32 apedata;
945
946         if (!tg3_flag(tp, ENABLE_APE))
947                 return;
948
949         switch (kind) {
950         case RESET_KIND_INIT:
951                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
952                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
953                                 APE_HOST_SEG_SIG_MAGIC);
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
955                                 APE_HOST_SEG_LEN_MAGIC);
956                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
957                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
958                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
959                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
960                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
961                                 APE_HOST_BEHAV_NO_PHYLOCK);
962                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
963                                     TG3_APE_HOST_DRVR_STATE_START);
964
965                 event = APE_EVENT_STATUS_STATE_START;
966                 break;
967         case RESET_KIND_SHUTDOWN:
968                 if (device_may_wakeup(&tp->pdev->dev) &&
969                     tg3_flag(tp, WOL_ENABLE)) {
970                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971                                             TG3_APE_HOST_WOL_SPEED_AUTO);
972                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973                 } else
974                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975
976                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977
978                 event = APE_EVENT_STATUS_STATE_UNLOAD;
979                 break;
980         default:
981                 return;
982         }
983
984         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985
986         tg3_ape_send_event(tp, event);
987 }
988
989 static void tg3_send_ape_heartbeat(struct tg3 *tp,
990                                    unsigned long interval)
991 {
992         /* Check if hb interval has exceeded */
993         if (!tg3_flag(tp, ENABLE_APE) ||
994             time_before(jiffies, tp->ape_hb_jiffies + interval))
995                 return;
996
997         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
998         tp->ape_hb_jiffies = jiffies;
999 }
1000
1001 static void tg3_disable_ints(struct tg3 *tp)
1002 {
1003         int i;
1004
1005         tw32(TG3PCI_MISC_HOST_CTRL,
1006              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1007         for (i = 0; i < tp->irq_max; i++)
1008                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1009 }
1010
1011 static void tg3_enable_ints(struct tg3 *tp)
1012 {
1013         int i;
1014
1015         tp->irq_sync = 0;
1016         wmb();
1017
1018         tw32(TG3PCI_MISC_HOST_CTRL,
1019              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1020
1021         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1022         for (i = 0; i < tp->irq_cnt; i++) {
1023                 struct tg3_napi *tnapi = &tp->napi[i];
1024
1025                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026                 if (tg3_flag(tp, 1SHOT_MSI))
1027                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1028
1029                 tp->coal_now |= tnapi->coal_now;
1030         }
1031
1032         /* Force an initial interrupt */
1033         if (!tg3_flag(tp, TAGGED_STATUS) &&
1034             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1035                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1036         else
1037                 tw32(HOSTCC_MODE, tp->coal_now);
1038
1039         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1040 }
1041
1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1043 {
1044         struct tg3 *tp = tnapi->tp;
1045         struct tg3_hw_status *sblk = tnapi->hw_status;
1046         unsigned int work_exists = 0;
1047
1048         /* check for phy events */
1049         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1050                 if (sblk->status & SD_STATUS_LINK_CHG)
1051                         work_exists = 1;
1052         }
1053
1054         /* check for TX work to do */
1055         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1056                 work_exists = 1;
1057
1058         /* check for RX work to do */
1059         if (tnapi->rx_rcb_prod_idx &&
1060             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061                 work_exists = 1;
1062
1063         return work_exists;
1064 }
1065
1066 /* tg3_int_reenable
1067  *  similar to tg3_enable_ints, but it accurately determines whether there
1068  *  is new work pending and can return without flushing the PIO write
1069  *  which reenables interrupts
1070  */
1071 static void tg3_int_reenable(struct tg3_napi *tnapi)
1072 {
1073         struct tg3 *tp = tnapi->tp;
1074
1075         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1076
1077         /* When doing tagged status, this work check is unnecessary.
1078          * The last_tag we write above tells the chip which piece of
1079          * work we've completed.
1080          */
1081         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1082                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1083                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1084 }
1085
1086 static void tg3_switch_clocks(struct tg3 *tp)
1087 {
1088         u32 clock_ctrl;
1089         u32 orig_clock_ctrl;
1090
1091         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1092                 return;
1093
1094         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1095
1096         orig_clock_ctrl = clock_ctrl;
1097         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1098                        CLOCK_CTRL_CLKRUN_OENABLE |
1099                        0x1f);
1100         tp->pci_clock_ctrl = clock_ctrl;
1101
1102         if (tg3_flag(tp, 5705_PLUS)) {
1103                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1104                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1106                 }
1107         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1108                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109                             clock_ctrl |
1110                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1111                             40);
1112                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1113                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1114                             40);
1115         }
1116         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1117 }
1118
1119 #define PHY_BUSY_LOOPS  5000
1120
1121 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1122                          u32 *val)
1123 {
1124         u32 frame_val;
1125         unsigned int loops;
1126         int ret;
1127
1128         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1129                 tw32_f(MAC_MI_MODE,
1130                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1131                 udelay(80);
1132         }
1133
1134         tg3_ape_lock(tp, tp->phy_ape_lock);
1135
1136         *val = 0x0;
1137
1138         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1139                       MI_COM_PHY_ADDR_MASK);
1140         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1141                       MI_COM_REG_ADDR_MASK);
1142         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1143
1144         tw32_f(MAC_MI_COM, frame_val);
1145
1146         loops = PHY_BUSY_LOOPS;
1147         while (loops != 0) {
1148                 udelay(10);
1149                 frame_val = tr32(MAC_MI_COM);
1150
1151                 if ((frame_val & MI_COM_BUSY) == 0) {
1152                         udelay(5);
1153                         frame_val = tr32(MAC_MI_COM);
1154                         break;
1155                 }
1156                 loops -= 1;
1157         }
1158
1159         ret = -EBUSY;
1160         if (loops != 0) {
1161                 *val = frame_val & MI_COM_DATA_MASK;
1162                 ret = 0;
1163         }
1164
1165         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1166                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1167                 udelay(80);
1168         }
1169
1170         tg3_ape_unlock(tp, tp->phy_ape_lock);
1171
1172         return ret;
1173 }
1174
1175 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1176 {
1177         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1178 }
1179
1180 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1181                           u32 val)
1182 {
1183         u32 frame_val;
1184         unsigned int loops;
1185         int ret;
1186
1187         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1188             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1189                 return 0;
1190
1191         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1192                 tw32_f(MAC_MI_MODE,
1193                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1194                 udelay(80);
1195         }
1196
1197         tg3_ape_lock(tp, tp->phy_ape_lock);
1198
1199         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1200                       MI_COM_PHY_ADDR_MASK);
1201         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1202                       MI_COM_REG_ADDR_MASK);
1203         frame_val |= (val & MI_COM_DATA_MASK);
1204         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1205
1206         tw32_f(MAC_MI_COM, frame_val);
1207
1208         loops = PHY_BUSY_LOOPS;
1209         while (loops != 0) {
1210                 udelay(10);
1211                 frame_val = tr32(MAC_MI_COM);
1212                 if ((frame_val & MI_COM_BUSY) == 0) {
1213                         udelay(5);
1214                         frame_val = tr32(MAC_MI_COM);
1215                         break;
1216                 }
1217                 loops -= 1;
1218         }
1219
1220         ret = -EBUSY;
1221         if (loops != 0)
1222                 ret = 0;
1223
1224         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1225                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1226                 udelay(80);
1227         }
1228
1229         tg3_ape_unlock(tp, tp->phy_ape_lock);
1230
1231         return ret;
1232 }
1233
1234 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1235 {
1236         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1237 }
1238
1239 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1240 {
1241         int err;
1242
1243         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1248         if (err)
1249                 goto done;
1250
1251         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1253         if (err)
1254                 goto done;
1255
1256         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1257
1258 done:
1259         return err;
1260 }
1261
1262 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1263 {
1264         int err;
1265
1266         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1267         if (err)
1268                 goto done;
1269
1270         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1271         if (err)
1272                 goto done;
1273
1274         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1275                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1276         if (err)
1277                 goto done;
1278
1279         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1280
1281 done:
1282         return err;
1283 }
1284
1285 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1286 {
1287         int err;
1288
1289         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1290         if (!err)
1291                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1292
1293         return err;
1294 }
1295
1296 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1297 {
1298         int err;
1299
1300         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1301         if (!err)
1302                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1303
1304         return err;
1305 }
1306
1307 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1308 {
1309         int err;
1310
1311         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1312                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1313                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1314         if (!err)
1315                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1316
1317         return err;
1318 }
1319
1320 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1321 {
1322         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1323                 set |= MII_TG3_AUXCTL_MISC_WREN;
1324
1325         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1326 }
1327
1328 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1329 {
1330         u32 val;
1331         int err;
1332
1333         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1334
1335         if (err)
1336                 return err;
1337
1338         if (enable)
1339                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1340         else
1341                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1342
1343         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1344                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1345
1346         return err;
1347 }
1348
1349 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1350 {
1351         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1352                             reg | val | MII_TG3_MISC_SHDW_WREN);
1353 }
1354
1355 static int tg3_bmcr_reset(struct tg3 *tp)
1356 {
1357         u32 phy_control;
1358         int limit, err;
1359
1360         /* OK, reset it, and poll the BMCR_RESET bit until it
1361          * clears or we time out.
1362          */
1363         phy_control = BMCR_RESET;
1364         err = tg3_writephy(tp, MII_BMCR, phy_control);
1365         if (err != 0)
1366                 return -EBUSY;
1367
1368         limit = 5000;
1369         while (limit--) {
1370                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1371                 if (err != 0)
1372                         return -EBUSY;
1373
1374                 if ((phy_control & BMCR_RESET) == 0) {
1375                         udelay(40);
1376                         break;
1377                 }
1378                 udelay(10);
1379         }
1380         if (limit < 0)
1381                 return -EBUSY;
1382
1383         return 0;
1384 }
1385
1386 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1387 {
1388         struct tg3 *tp = bp->priv;
1389         u32 val;
1390
1391         spin_lock_bh(&tp->lock);
1392
1393         if (__tg3_readphy(tp, mii_id, reg, &val))
1394                 val = -EIO;
1395
1396         spin_unlock_bh(&tp->lock);
1397
1398         return val;
1399 }
1400
1401 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1402 {
1403         struct tg3 *tp = bp->priv;
1404         u32 ret = 0;
1405
1406         spin_lock_bh(&tp->lock);
1407
1408         if (__tg3_writephy(tp, mii_id, reg, val))
1409                 ret = -EIO;
1410
1411         spin_unlock_bh(&tp->lock);
1412
1413         return ret;
1414 }
1415
1416 static void tg3_mdio_config_5785(struct tg3 *tp)
1417 {
1418         u32 val;
1419         struct phy_device *phydev;
1420
1421         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1422         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1423         case PHY_ID_BCM50610:
1424         case PHY_ID_BCM50610M:
1425                 val = MAC_PHYCFG2_50610_LED_MODES;
1426                 break;
1427         case PHY_ID_BCMAC131:
1428                 val = MAC_PHYCFG2_AC131_LED_MODES;
1429                 break;
1430         case PHY_ID_RTL8211C:
1431                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1432                 break;
1433         case PHY_ID_RTL8201E:
1434                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1435                 break;
1436         default:
1437                 return;
1438         }
1439
1440         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1441                 tw32(MAC_PHYCFG2, val);
1442
1443                 val = tr32(MAC_PHYCFG1);
1444                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1445                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1446                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1447                 tw32(MAC_PHYCFG1, val);
1448
1449                 return;
1450         }
1451
1452         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1453                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1454                        MAC_PHYCFG2_FMODE_MASK_MASK |
1455                        MAC_PHYCFG2_GMODE_MASK_MASK |
1456                        MAC_PHYCFG2_ACT_MASK_MASK   |
1457                        MAC_PHYCFG2_QUAL_MASK_MASK |
1458                        MAC_PHYCFG2_INBAND_ENABLE;
1459
1460         tw32(MAC_PHYCFG2, val);
1461
1462         val = tr32(MAC_PHYCFG1);
1463         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1464                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1465         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1470         }
1471         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1472                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1473         tw32(MAC_PHYCFG1, val);
1474
1475         val = tr32(MAC_EXT_RGMII_MODE);
1476         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1477                  MAC_RGMII_MODE_RX_QUALITY |
1478                  MAC_RGMII_MODE_RX_ACTIVITY |
1479                  MAC_RGMII_MODE_RX_ENG_DET |
1480                  MAC_RGMII_MODE_TX_ENABLE |
1481                  MAC_RGMII_MODE_TX_LOWPWR |
1482                  MAC_RGMII_MODE_TX_RESET);
1483         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1484                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1485                         val |= MAC_RGMII_MODE_RX_INT_B |
1486                                MAC_RGMII_MODE_RX_QUALITY |
1487                                MAC_RGMII_MODE_RX_ACTIVITY |
1488                                MAC_RGMII_MODE_RX_ENG_DET;
1489                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1490                         val |= MAC_RGMII_MODE_TX_ENABLE |
1491                                MAC_RGMII_MODE_TX_LOWPWR |
1492                                MAC_RGMII_MODE_TX_RESET;
1493         }
1494         tw32(MAC_EXT_RGMII_MODE, val);
1495 }
1496
1497 static void tg3_mdio_start(struct tg3 *tp)
1498 {
1499         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1500         tw32_f(MAC_MI_MODE, tp->mi_mode);
1501         udelay(80);
1502
1503         if (tg3_flag(tp, MDIOBUS_INITED) &&
1504             tg3_asic_rev(tp) == ASIC_REV_5785)
1505                 tg3_mdio_config_5785(tp);
1506 }
1507
1508 static int tg3_mdio_init(struct tg3 *tp)
1509 {
1510         int i;
1511         u32 reg;
1512         struct phy_device *phydev;
1513
1514         if (tg3_flag(tp, 5717_PLUS)) {
1515                 u32 is_serdes;
1516
1517                 tp->phy_addr = tp->pci_fn + 1;
1518
1519                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1520                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1521                 else
1522                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1523                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1524                 if (is_serdes)
1525                         tp->phy_addr += 7;
1526         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1527                 int addr;
1528
1529                 addr = ssb_gige_get_phyaddr(tp->pdev);
1530                 if (addr < 0)
1531                         return addr;
1532                 tp->phy_addr = addr;
1533         } else
1534                 tp->phy_addr = TG3_PHY_MII_ADDR;
1535
1536         tg3_mdio_start(tp);
1537
1538         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1539                 return 0;
1540
1541         tp->mdio_bus = mdiobus_alloc();
1542         if (tp->mdio_bus == NULL)
1543                 return -ENOMEM;
1544
1545         tp->mdio_bus->name     = "tg3 mdio bus";
1546         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1547                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1548         tp->mdio_bus->priv     = tp;
1549         tp->mdio_bus->parent   = &tp->pdev->dev;
1550         tp->mdio_bus->read     = &tg3_mdio_read;
1551         tp->mdio_bus->write    = &tg3_mdio_write;
1552         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1553
1554         /* The bus registration will look for all the PHYs on the mdio bus.
1555          * Unfortunately, it does not ensure the PHY is powered up before
1556          * accessing the PHY ID registers.  A chip reset is the
1557          * quickest way to bring the device back to an operational state..
1558          */
1559         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1560                 tg3_bmcr_reset(tp);
1561
1562         i = mdiobus_register(tp->mdio_bus);
1563         if (i) {
1564                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1565                 mdiobus_free(tp->mdio_bus);
1566                 return i;
1567         }
1568
1569         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1570
1571         if (!phydev || !phydev->drv) {
1572                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1573                 mdiobus_unregister(tp->mdio_bus);
1574                 mdiobus_free(tp->mdio_bus);
1575                 return -ENODEV;
1576         }
1577
1578         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1579         case PHY_ID_BCM57780:
1580                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1581                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1582                 break;
1583         case PHY_ID_BCM50610:
1584         case PHY_ID_BCM50610M:
1585                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1586                                      PHY_BRCM_RX_REFCLK_UNUSED |
1587                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1588                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1589                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1590                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1591                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1592                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1593                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1594                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1595                 /* fall through */
1596         case PHY_ID_RTL8211C:
1597                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1598                 break;
1599         case PHY_ID_RTL8201E:
1600         case PHY_ID_BCMAC131:
1601                 phydev->interface = PHY_INTERFACE_MODE_MII;
1602                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1603                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1604                 break;
1605         }
1606
1607         tg3_flag_set(tp, MDIOBUS_INITED);
1608
1609         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1610                 tg3_mdio_config_5785(tp);
1611
1612         return 0;
1613 }
1614
1615 static void tg3_mdio_fini(struct tg3 *tp)
1616 {
1617         if (tg3_flag(tp, MDIOBUS_INITED)) {
1618                 tg3_flag_clear(tp, MDIOBUS_INITED);
1619                 mdiobus_unregister(tp->mdio_bus);
1620                 mdiobus_free(tp->mdio_bus);
1621         }
1622 }
1623
1624 /* tp->lock is held. */
1625 static inline void tg3_generate_fw_event(struct tg3 *tp)
1626 {
1627         u32 val;
1628
1629         val = tr32(GRC_RX_CPU_EVENT);
1630         val |= GRC_RX_CPU_DRIVER_EVENT;
1631         tw32_f(GRC_RX_CPU_EVENT, val);
1632
1633         tp->last_event_jiffies = jiffies;
1634 }
1635
1636 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1637
1638 /* tp->lock is held. */
1639 static void tg3_wait_for_event_ack(struct tg3 *tp)
1640 {
1641         int i;
1642         unsigned int delay_cnt;
1643         long time_remain;
1644
1645         /* If enough time has passed, no wait is necessary. */
1646         time_remain = (long)(tp->last_event_jiffies + 1 +
1647                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1648                       (long)jiffies;
1649         if (time_remain < 0)
1650                 return;
1651
1652         /* Check if we can shorten the wait time. */
1653         delay_cnt = jiffies_to_usecs(time_remain);
1654         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1655                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1656         delay_cnt = (delay_cnt >> 3) + 1;
1657
1658         for (i = 0; i < delay_cnt; i++) {
1659                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1660                         break;
1661                 if (pci_channel_offline(tp->pdev))
1662                         break;
1663
1664                 udelay(8);
1665         }
1666 }
1667
1668 /* tp->lock is held. */
1669 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1670 {
1671         u32 reg, val;
1672
1673         val = 0;
1674         if (!tg3_readphy(tp, MII_BMCR, &reg))
1675                 val = reg << 16;
1676         if (!tg3_readphy(tp, MII_BMSR, &reg))
1677                 val |= (reg & 0xffff);
1678         *data++ = val;
1679
1680         val = 0;
1681         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1682                 val = reg << 16;
1683         if (!tg3_readphy(tp, MII_LPA, &reg))
1684                 val |= (reg & 0xffff);
1685         *data++ = val;
1686
1687         val = 0;
1688         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1689                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1690                         val = reg << 16;
1691                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1692                         val |= (reg & 0xffff);
1693         }
1694         *data++ = val;
1695
1696         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1697                 val = reg << 16;
1698         else
1699                 val = 0;
1700         *data++ = val;
1701 }
1702
1703 /* tp->lock is held. */
1704 static void tg3_ump_link_report(struct tg3 *tp)
1705 {
1706         u32 data[4];
1707
1708         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1709                 return;
1710
1711         tg3_phy_gather_ump_data(tp, data);
1712
1713         tg3_wait_for_event_ack(tp);
1714
1715         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1716         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1717         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1718         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1719         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1720         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1721
1722         tg3_generate_fw_event(tp);
1723 }
1724
1725 /* tp->lock is held. */
1726 static void tg3_stop_fw(struct tg3 *tp)
1727 {
1728         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1729                 /* Wait for RX cpu to ACK the previous event. */
1730                 tg3_wait_for_event_ack(tp);
1731
1732                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1733
1734                 tg3_generate_fw_event(tp);
1735
1736                 /* Wait for RX cpu to ACK this event. */
1737                 tg3_wait_for_event_ack(tp);
1738         }
1739 }
1740
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1743 {
1744         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1745                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1746
1747         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1748                 switch (kind) {
1749                 case RESET_KIND_INIT:
1750                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751                                       DRV_STATE_START);
1752                         break;
1753
1754                 case RESET_KIND_SHUTDOWN:
1755                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756                                       DRV_STATE_UNLOAD);
1757                         break;
1758
1759                 case RESET_KIND_SUSPEND:
1760                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761                                       DRV_STATE_SUSPEND);
1762                         break;
1763
1764                 default:
1765                         break;
1766                 }
1767         }
1768 }
1769
1770 /* tp->lock is held. */
1771 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1772 {
1773         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1774                 switch (kind) {
1775                 case RESET_KIND_INIT:
1776                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777                                       DRV_STATE_START_DONE);
1778                         break;
1779
1780                 case RESET_KIND_SHUTDOWN:
1781                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782                                       DRV_STATE_UNLOAD_DONE);
1783                         break;
1784
1785                 default:
1786                         break;
1787                 }
1788         }
1789 }
1790
1791 /* tp->lock is held. */
1792 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1793 {
1794         if (tg3_flag(tp, ENABLE_ASF)) {
1795                 switch (kind) {
1796                 case RESET_KIND_INIT:
1797                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798                                       DRV_STATE_START);
1799                         break;
1800
1801                 case RESET_KIND_SHUTDOWN:
1802                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803                                       DRV_STATE_UNLOAD);
1804                         break;
1805
1806                 case RESET_KIND_SUSPEND:
1807                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1808                                       DRV_STATE_SUSPEND);
1809                         break;
1810
1811                 default:
1812                         break;
1813                 }
1814         }
1815 }
1816
1817 static int tg3_poll_fw(struct tg3 *tp)
1818 {
1819         int i;
1820         u32 val;
1821
1822         if (tg3_flag(tp, NO_FWARE_REPORTED))
1823                 return 0;
1824
1825         if (tg3_flag(tp, IS_SSB_CORE)) {
1826                 /* We don't use firmware. */
1827                 return 0;
1828         }
1829
1830         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1831                 /* Wait up to 20ms for init done. */
1832                 for (i = 0; i < 200; i++) {
1833                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1834                                 return 0;
1835                         if (pci_channel_offline(tp->pdev))
1836                                 return -ENODEV;
1837
1838                         udelay(100);
1839                 }
1840                 return -ENODEV;
1841         }
1842
1843         /* Wait for firmware initialization to complete. */
1844         for (i = 0; i < 100000; i++) {
1845                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1846                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1847                         break;
1848                 if (pci_channel_offline(tp->pdev)) {
1849                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1850                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1851                                 netdev_info(tp->dev, "No firmware running\n");
1852                         }
1853
1854                         break;
1855                 }
1856
1857                 udelay(10);
1858         }
1859
1860         /* Chip might not be fitted with firmware.  Some Sun onboard
1861          * parts are configured like that.  So don't signal the timeout
1862          * of the above loop as an error, but do report the lack of
1863          * running firmware once.
1864          */
1865         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1866                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1867
1868                 netdev_info(tp->dev, "No firmware running\n");
1869         }
1870
1871         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1872                 /* The 57765 A0 needs a little more
1873                  * time to do some important work.
1874                  */
1875                 mdelay(10);
1876         }
1877
1878         return 0;
1879 }
1880
1881 static void tg3_link_report(struct tg3 *tp)
1882 {
1883         if (!netif_carrier_ok(tp->dev)) {
1884                 netif_info(tp, link, tp->dev, "Link is down\n");
1885                 tg3_ump_link_report(tp);
1886         } else if (netif_msg_link(tp)) {
1887                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1888                             (tp->link_config.active_speed == SPEED_1000 ?
1889                              1000 :
1890                              (tp->link_config.active_speed == SPEED_100 ?
1891                               100 : 10)),
1892                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1893                              "full" : "half"));
1894
1895                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1896                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1897                             "on" : "off",
1898                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1899                             "on" : "off");
1900
1901                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1902                         netdev_info(tp->dev, "EEE is %s\n",
1903                                     tp->setlpicnt ? "enabled" : "disabled");
1904
1905                 tg3_ump_link_report(tp);
1906         }
1907
1908         tp->link_up = netif_carrier_ok(tp->dev);
1909 }
1910
1911 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1912 {
1913         u32 flowctrl = 0;
1914
1915         if (adv & ADVERTISE_PAUSE_CAP) {
1916                 flowctrl |= FLOW_CTRL_RX;
1917                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1918                         flowctrl |= FLOW_CTRL_TX;
1919         } else if (adv & ADVERTISE_PAUSE_ASYM)
1920                 flowctrl |= FLOW_CTRL_TX;
1921
1922         return flowctrl;
1923 }
1924
1925 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1926 {
1927         u16 miireg;
1928
1929         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1930                 miireg = ADVERTISE_1000XPAUSE;
1931         else if (flow_ctrl & FLOW_CTRL_TX)
1932                 miireg = ADVERTISE_1000XPSE_ASYM;
1933         else if (flow_ctrl & FLOW_CTRL_RX)
1934                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1935         else
1936                 miireg = 0;
1937
1938         return miireg;
1939 }
1940
1941 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1942 {
1943         u32 flowctrl = 0;
1944
1945         if (adv & ADVERTISE_1000XPAUSE) {
1946                 flowctrl |= FLOW_CTRL_RX;
1947                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1948                         flowctrl |= FLOW_CTRL_TX;
1949         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1950                 flowctrl |= FLOW_CTRL_TX;
1951
1952         return flowctrl;
1953 }
1954
1955 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1956 {
1957         u8 cap = 0;
1958
1959         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1960                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1961         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1962                 if (lcladv & ADVERTISE_1000XPAUSE)
1963                         cap = FLOW_CTRL_RX;
1964                 if (rmtadv & ADVERTISE_1000XPAUSE)
1965                         cap = FLOW_CTRL_TX;
1966         }
1967
1968         return cap;
1969 }
1970
1971 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1972 {
1973         u8 autoneg;
1974         u8 flowctrl = 0;
1975         u32 old_rx_mode = tp->rx_mode;
1976         u32 old_tx_mode = tp->tx_mode;
1977
1978         if (tg3_flag(tp, USE_PHYLIB))
1979                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1980         else
1981                 autoneg = tp->link_config.autoneg;
1982
1983         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1984                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1985                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1986                 else
1987                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1988         } else
1989                 flowctrl = tp->link_config.flowctrl;
1990
1991         tp->link_config.active_flowctrl = flowctrl;
1992
1993         if (flowctrl & FLOW_CTRL_RX)
1994                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1995         else
1996                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1997
1998         if (old_rx_mode != tp->rx_mode)
1999                 tw32_f(MAC_RX_MODE, tp->rx_mode);
2000
2001         if (flowctrl & FLOW_CTRL_TX)
2002                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2003         else
2004                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2005
2006         if (old_tx_mode != tp->tx_mode)
2007                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2008 }
2009
2010 static void tg3_adjust_link(struct net_device *dev)
2011 {
2012         u8 oldflowctrl, linkmesg = 0;
2013         u32 mac_mode, lcl_adv, rmt_adv;
2014         struct tg3 *tp = netdev_priv(dev);
2015         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2016
2017         spin_lock_bh(&tp->lock);
2018
2019         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2020                                     MAC_MODE_HALF_DUPLEX);
2021
2022         oldflowctrl = tp->link_config.active_flowctrl;
2023
2024         if (phydev->link) {
2025                 lcl_adv = 0;
2026                 rmt_adv = 0;
2027
2028                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2029                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2030                 else if (phydev->speed == SPEED_1000 ||
2031                          tg3_asic_rev(tp) != ASIC_REV_5785)
2032                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2033                 else
2034                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2035
2036                 if (phydev->duplex == DUPLEX_HALF)
2037                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2038                 else {
2039                         lcl_adv = mii_advertise_flowctrl(
2040                                   tp->link_config.flowctrl);
2041
2042                         if (phydev->pause)
2043                                 rmt_adv = LPA_PAUSE_CAP;
2044                         if (phydev->asym_pause)
2045                                 rmt_adv |= LPA_PAUSE_ASYM;
2046                 }
2047
2048                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2049         } else
2050                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2051
2052         if (mac_mode != tp->mac_mode) {
2053                 tp->mac_mode = mac_mode;
2054                 tw32_f(MAC_MODE, tp->mac_mode);
2055                 udelay(40);
2056         }
2057
2058         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2059                 if (phydev->speed == SPEED_10)
2060                         tw32(MAC_MI_STAT,
2061                              MAC_MI_STAT_10MBPS_MODE |
2062                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2063                 else
2064                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2065         }
2066
2067         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2068                 tw32(MAC_TX_LENGTHS,
2069                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070                       (6 << TX_LENGTHS_IPG_SHIFT) |
2071                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072         else
2073                 tw32(MAC_TX_LENGTHS,
2074                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2075                       (6 << TX_LENGTHS_IPG_SHIFT) |
2076                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2077
2078         if (phydev->link != tp->old_link ||
2079             phydev->speed != tp->link_config.active_speed ||
2080             phydev->duplex != tp->link_config.active_duplex ||
2081             oldflowctrl != tp->link_config.active_flowctrl)
2082                 linkmesg = 1;
2083
2084         tp->old_link = phydev->link;
2085         tp->link_config.active_speed = phydev->speed;
2086         tp->link_config.active_duplex = phydev->duplex;
2087
2088         spin_unlock_bh(&tp->lock);
2089
2090         if (linkmesg)
2091                 tg3_link_report(tp);
2092 }
2093
2094 static int tg3_phy_init(struct tg3 *tp)
2095 {
2096         struct phy_device *phydev;
2097
2098         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2099                 return 0;
2100
2101         /* Bring the PHY back to a known state. */
2102         tg3_bmcr_reset(tp);
2103
2104         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2105
2106         /* Attach the MAC to the PHY. */
2107         phydev = phy_connect(tp->dev, phydev_name(phydev),
2108                              tg3_adjust_link, phydev->interface);
2109         if (IS_ERR(phydev)) {
2110                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2111                 return PTR_ERR(phydev);
2112         }
2113
2114         /* Mask with MAC supported features. */
2115         switch (phydev->interface) {
2116         case PHY_INTERFACE_MODE_GMII:
2117         case PHY_INTERFACE_MODE_RGMII:
2118                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2119                         phy_set_max_speed(phydev, SPEED_1000);
2120                         phy_support_asym_pause(phydev);
2121                         break;
2122                 }
2123                 /* fall through */
2124         case PHY_INTERFACE_MODE_MII:
2125                 phy_set_max_speed(phydev, SPEED_100);
2126                 phy_support_asym_pause(phydev);
2127                 break;
2128         default:
2129                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2130                 return -EINVAL;
2131         }
2132
2133         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2134
2135         phy_attached_info(phydev);
2136
2137         return 0;
2138 }
2139
2140 static void tg3_phy_start(struct tg3 *tp)
2141 {
2142         struct phy_device *phydev;
2143
2144         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2145                 return;
2146
2147         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2148
2149         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2150                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2151                 phydev->speed = tp->link_config.speed;
2152                 phydev->duplex = tp->link_config.duplex;
2153                 phydev->autoneg = tp->link_config.autoneg;
2154                 ethtool_convert_legacy_u32_to_link_mode(
2155                         phydev->advertising, tp->link_config.advertising);
2156         }
2157
2158         phy_start(phydev);
2159
2160         phy_start_aneg(phydev);
2161 }
2162
2163 static void tg3_phy_stop(struct tg3 *tp)
2164 {
2165         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2166                 return;
2167
2168         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2169 }
2170
2171 static void tg3_phy_fini(struct tg3 *tp)
2172 {
2173         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2174                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2175                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2176         }
2177 }
2178
2179 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2180 {
2181         int err;
2182         u32 val;
2183
2184         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2185                 return 0;
2186
2187         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2188                 /* Cannot do read-modify-write on 5401 */
2189                 err = tg3_phy_auxctl_write(tp,
2190                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2191                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2192                                            0x4c20);
2193                 goto done;
2194         }
2195
2196         err = tg3_phy_auxctl_read(tp,
2197                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2198         if (err)
2199                 return err;
2200
2201         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2202         err = tg3_phy_auxctl_write(tp,
2203                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2204
2205 done:
2206         return err;
2207 }
2208
2209 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2210 {
2211         u32 phytest;
2212
2213         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2214                 u32 phy;
2215
2216                 tg3_writephy(tp, MII_TG3_FET_TEST,
2217                              phytest | MII_TG3_FET_SHADOW_EN);
2218                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2219                         if (enable)
2220                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2221                         else
2222                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2223                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2224                 }
2225                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2226         }
2227 }
2228
2229 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2230 {
2231         u32 reg;
2232
2233         if (!tg3_flag(tp, 5705_PLUS) ||
2234             (tg3_flag(tp, 5717_PLUS) &&
2235              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2236                 return;
2237
2238         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2239                 tg3_phy_fet_toggle_apd(tp, enable);
2240                 return;
2241         }
2242
2243         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2244               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2245               MII_TG3_MISC_SHDW_SCR5_SDTL |
2246               MII_TG3_MISC_SHDW_SCR5_C125OE;
2247         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2248                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2249
2250         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2251
2252
2253         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2254         if (enable)
2255                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2256
2257         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2258 }
2259
2260 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2261 {
2262         u32 phy;
2263
2264         if (!tg3_flag(tp, 5705_PLUS) ||
2265             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2266                 return;
2267
2268         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2269                 u32 ephy;
2270
2271                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2272                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2273
2274                         tg3_writephy(tp, MII_TG3_FET_TEST,
2275                                      ephy | MII_TG3_FET_SHADOW_EN);
2276                         if (!tg3_readphy(tp, reg, &phy)) {
2277                                 if (enable)
2278                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2279                                 else
2280                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2281                                 tg3_writephy(tp, reg, phy);
2282                         }
2283                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2284                 }
2285         } else {
2286                 int ret;
2287
2288                 ret = tg3_phy_auxctl_read(tp,
2289                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2290                 if (!ret) {
2291                         if (enable)
2292                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2293                         else
2294                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2295                         tg3_phy_auxctl_write(tp,
2296                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2297                 }
2298         }
2299 }
2300
2301 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2302 {
2303         int ret;
2304         u32 val;
2305
2306         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2307                 return;
2308
2309         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2310         if (!ret)
2311                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2312                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2313 }
2314
2315 static void tg3_phy_apply_otp(struct tg3 *tp)
2316 {
2317         u32 otp, phy;
2318
2319         if (!tp->phy_otp)
2320                 return;
2321
2322         otp = tp->phy_otp;
2323
2324         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2325                 return;
2326
2327         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2328         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2329         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2330
2331         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2332               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2333         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2334
2335         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2336         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2337         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2338
2339         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2340         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2341
2342         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2343         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2344
2345         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2346               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2347         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2348
2349         tg3_phy_toggle_auxctl_smdsp(tp, false);
2350 }
2351
2352 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2353 {
2354         u32 val;
2355         struct ethtool_eee *dest = &tp->eee;
2356
2357         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2358                 return;
2359
2360         if (eee)
2361                 dest = eee;
2362
2363         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2364                 return;
2365
2366         /* Pull eee_active */
2367         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2368             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2369                 dest->eee_active = 1;
2370         } else
2371                 dest->eee_active = 0;
2372
2373         /* Pull lp advertised settings */
2374         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2375                 return;
2376         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377
2378         /* Pull advertised and eee_enabled settings */
2379         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2380                 return;
2381         dest->eee_enabled = !!val;
2382         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2383
2384         /* Pull tx_lpi_enabled */
2385         val = tr32(TG3_CPMU_EEE_MODE);
2386         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2387
2388         /* Pull lpi timer value */
2389         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2390 }
2391
2392 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2393 {
2394         u32 val;
2395
2396         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2397                 return;
2398
2399         tp->setlpicnt = 0;
2400
2401         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2402             current_link_up &&
2403             tp->link_config.active_duplex == DUPLEX_FULL &&
2404             (tp->link_config.active_speed == SPEED_100 ||
2405              tp->link_config.active_speed == SPEED_1000)) {
2406                 u32 eeectl;
2407
2408                 if (tp->link_config.active_speed == SPEED_1000)
2409                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2410                 else
2411                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2412
2413                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2414
2415                 tg3_eee_pull_config(tp, NULL);
2416                 if (tp->eee.eee_active)
2417                         tp->setlpicnt = 2;
2418         }
2419
2420         if (!tp->setlpicnt) {
2421                 if (current_link_up &&
2422                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2424                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2425                 }
2426
2427                 val = tr32(TG3_CPMU_EEE_MODE);
2428                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2429         }
2430 }
2431
2432 static void tg3_phy_eee_enable(struct tg3 *tp)
2433 {
2434         u32 val;
2435
2436         if (tp->link_config.active_speed == SPEED_1000 &&
2437             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2438              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2439              tg3_flag(tp, 57765_CLASS)) &&
2440             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2441                 val = MII_TG3_DSP_TAP26_ALNOKO |
2442                       MII_TG3_DSP_TAP26_RMRXSTO;
2443                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2444                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2445         }
2446
2447         val = tr32(TG3_CPMU_EEE_MODE);
2448         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2449 }
2450
2451 static int tg3_wait_macro_done(struct tg3 *tp)
2452 {
2453         int limit = 100;
2454
2455         while (limit--) {
2456                 u32 tmp32;
2457
2458                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2459                         if ((tmp32 & 0x1000) == 0)
2460                                 break;
2461                 }
2462         }
2463         if (limit < 0)
2464                 return -EBUSY;
2465
2466         return 0;
2467 }
2468
2469 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2470 {
2471         static const u32 test_pat[4][6] = {
2472         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2473         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2474         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2475         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2476         };
2477         int chan;
2478
2479         for (chan = 0; chan < 4; chan++) {
2480                 int i;
2481
2482                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2483                              (chan * 0x2000) | 0x0200);
2484                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2485
2486                 for (i = 0; i < 6; i++)
2487                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2488                                      test_pat[chan][i]);
2489
2490                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2491                 if (tg3_wait_macro_done(tp)) {
2492                         *resetp = 1;
2493                         return -EBUSY;
2494                 }
2495
2496                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2497                              (chan * 0x2000) | 0x0200);
2498                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2499                 if (tg3_wait_macro_done(tp)) {
2500                         *resetp = 1;
2501                         return -EBUSY;
2502                 }
2503
2504                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2505                 if (tg3_wait_macro_done(tp)) {
2506                         *resetp = 1;
2507                         return -EBUSY;
2508                 }
2509
2510                 for (i = 0; i < 6; i += 2) {
2511                         u32 low, high;
2512
2513                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2514                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2515                             tg3_wait_macro_done(tp)) {
2516                                 *resetp = 1;
2517                                 return -EBUSY;
2518                         }
2519                         low &= 0x7fff;
2520                         high &= 0x000f;
2521                         if (low != test_pat[chan][i] ||
2522                             high != test_pat[chan][i+1]) {
2523                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2524                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2525                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2526
2527                                 return -EBUSY;
2528                         }
2529                 }
2530         }
2531
2532         return 0;
2533 }
2534
2535 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2536 {
2537         int chan;
2538
2539         for (chan = 0; chan < 4; chan++) {
2540                 int i;
2541
2542                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2543                              (chan * 0x2000) | 0x0200);
2544                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2545                 for (i = 0; i < 6; i++)
2546                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2547                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2548                 if (tg3_wait_macro_done(tp))
2549                         return -EBUSY;
2550         }
2551
2552         return 0;
2553 }
2554
2555 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2556 {
2557         u32 reg32, phy9_orig;
2558         int retries, do_phy_reset, err;
2559
2560         retries = 10;
2561         do_phy_reset = 1;
2562         do {
2563                 if (do_phy_reset) {
2564                         err = tg3_bmcr_reset(tp);
2565                         if (err)
2566                                 return err;
2567                         do_phy_reset = 0;
2568                 }
2569
2570                 /* Disable transmitter and interrupt.  */
2571                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2572                         continue;
2573
2574                 reg32 |= 0x3000;
2575                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2576
2577                 /* Set full-duplex, 1000 mbps.  */
2578                 tg3_writephy(tp, MII_BMCR,
2579                              BMCR_FULLDPLX | BMCR_SPEED1000);
2580
2581                 /* Set to master mode.  */
2582                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2583                         continue;
2584
2585                 tg3_writephy(tp, MII_CTRL1000,
2586                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2587
2588                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2589                 if (err)
2590                         return err;
2591
2592                 /* Block the PHY control access.  */
2593                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2594
2595                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2596                 if (!err)
2597                         break;
2598         } while (--retries);
2599
2600         err = tg3_phy_reset_chanpat(tp);
2601         if (err)
2602                 return err;
2603
2604         tg3_phydsp_write(tp, 0x8005, 0x0000);
2605
2606         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2607         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2608
2609         tg3_phy_toggle_auxctl_smdsp(tp, false);
2610
2611         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2612
2613         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2614         if (err)
2615                 return err;
2616
2617         reg32 &= ~0x3000;
2618         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2619
2620         return 0;
2621 }
2622
2623 static void tg3_carrier_off(struct tg3 *tp)
2624 {
2625         netif_carrier_off(tp->dev);
2626         tp->link_up = false;
2627 }
2628
2629 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2630 {
2631         if (tg3_flag(tp, ENABLE_ASF))
2632                 netdev_warn(tp->dev,
2633                             "Management side-band traffic will be interrupted during phy settings change\n");
2634 }
2635
2636 /* This will reset the tigon3 PHY if there is no valid
2637  * link unless the FORCE argument is non-zero.
2638  */
2639 static int tg3_phy_reset(struct tg3 *tp)
2640 {
2641         u32 val, cpmuctrl;
2642         int err;
2643
2644         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2645                 val = tr32(GRC_MISC_CFG);
2646                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2647                 udelay(40);
2648         }
2649         err  = tg3_readphy(tp, MII_BMSR, &val);
2650         err |= tg3_readphy(tp, MII_BMSR, &val);
2651         if (err != 0)
2652                 return -EBUSY;
2653
2654         if (netif_running(tp->dev) && tp->link_up) {
2655                 netif_carrier_off(tp->dev);
2656                 tg3_link_report(tp);
2657         }
2658
2659         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2660             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2661             tg3_asic_rev(tp) == ASIC_REV_5705) {
2662                 err = tg3_phy_reset_5703_4_5(tp);
2663                 if (err)
2664                         return err;
2665                 goto out;
2666         }
2667
2668         cpmuctrl = 0;
2669         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2670             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2671                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2672                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2673                         tw32(TG3_CPMU_CTRL,
2674                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2675         }
2676
2677         err = tg3_bmcr_reset(tp);
2678         if (err)
2679                 return err;
2680
2681         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2682                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2683                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2684
2685                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2686         }
2687
2688         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2689             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2690                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2691                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2692                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2693                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2694                         udelay(40);
2695                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2696                 }
2697         }
2698
2699         if (tg3_flag(tp, 5717_PLUS) &&
2700             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2701                 return 0;
2702
2703         tg3_phy_apply_otp(tp);
2704
2705         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2706                 tg3_phy_toggle_apd(tp, true);
2707         else
2708                 tg3_phy_toggle_apd(tp, false);
2709
2710 out:
2711         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2712             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2714                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2715                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2716         }
2717
2718         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2719                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2720                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2721         }
2722
2723         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2724                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2725                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2726                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2727                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2728                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2729                 }
2730         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2731                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2732                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2733                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2734                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2735                                 tg3_writephy(tp, MII_TG3_TEST1,
2736                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2737                         } else
2738                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2739
2740                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2741                 }
2742         }
2743
2744         /* Set Extended packet length bit (bit 14) on all chips that */
2745         /* support jumbo frames */
2746         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2747                 /* Cannot do read-modify-write on 5401 */
2748                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2749         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750                 /* Set bit 14 with read-modify-write to preserve other bits */
2751                 err = tg3_phy_auxctl_read(tp,
2752                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2753                 if (!err)
2754                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2755                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2756         }
2757
2758         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2759          * jumbo frames transmission.
2760          */
2761         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2762                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2763                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2764                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2765         }
2766
2767         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2768                 /* adjust output voltage */
2769                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2770         }
2771
2772         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2773                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2774
2775         tg3_phy_toggle_automdix(tp, true);
2776         tg3_phy_set_wirespeed(tp);
2777         return 0;
2778 }
2779
2780 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2781 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2782 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2783                                           TG3_GPIO_MSG_NEED_VAUX)
2784 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2785         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2786          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2787          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2788          (TG3_GPIO_MSG_DRVR_PRES << 12))
2789
2790 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2791         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2792          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2793          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2794          (TG3_GPIO_MSG_NEED_VAUX << 12))
2795
2796 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2797 {
2798         u32 status, shift;
2799
2800         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801             tg3_asic_rev(tp) == ASIC_REV_5719)
2802                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2803         else
2804                 status = tr32(TG3_CPMU_DRV_STATUS);
2805
2806         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2807         status &= ~(TG3_GPIO_MSG_MASK << shift);
2808         status |= (newstat << shift);
2809
2810         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2811             tg3_asic_rev(tp) == ASIC_REV_5719)
2812                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2813         else
2814                 tw32(TG3_CPMU_DRV_STATUS, status);
2815
2816         return status >> TG3_APE_GPIO_MSG_SHIFT;
2817 }
2818
2819 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2820 {
2821         if (!tg3_flag(tp, IS_NIC))
2822                 return 0;
2823
2824         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2825             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2826             tg3_asic_rev(tp) == ASIC_REV_5720) {
2827                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2828                         return -EIO;
2829
2830                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2831
2832                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2834
2835                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2836         } else {
2837                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2838                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2839         }
2840
2841         return 0;
2842 }
2843
2844 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2845 {
2846         u32 grc_local_ctrl;
2847
2848         if (!tg3_flag(tp, IS_NIC) ||
2849             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2850             tg3_asic_rev(tp) == ASIC_REV_5701)
2851                 return;
2852
2853         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2854
2855         tw32_wait_f(GRC_LOCAL_CTRL,
2856                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2857                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2858
2859         tw32_wait_f(GRC_LOCAL_CTRL,
2860                     grc_local_ctrl,
2861                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2862
2863         tw32_wait_f(GRC_LOCAL_CTRL,
2864                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2865                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2866 }
2867
2868 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2869 {
2870         if (!tg3_flag(tp, IS_NIC))
2871                 return;
2872
2873         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2874             tg3_asic_rev(tp) == ASIC_REV_5701) {
2875                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2876                             (GRC_LCLCTRL_GPIO_OE0 |
2877                              GRC_LCLCTRL_GPIO_OE1 |
2878                              GRC_LCLCTRL_GPIO_OE2 |
2879                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2880                              GRC_LCLCTRL_GPIO_OUTPUT1),
2881                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2882         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2883                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2884                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2885                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2886                                      GRC_LCLCTRL_GPIO_OE1 |
2887                                      GRC_LCLCTRL_GPIO_OE2 |
2888                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2889                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2890                                      tp->grc_local_ctrl;
2891                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2892                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2893
2894                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2895                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2896                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2897
2898                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2899                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2900                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2901         } else {
2902                 u32 no_gpio2;
2903                 u32 grc_local_ctrl = 0;
2904
2905                 /* Workaround to prevent overdrawing Amps. */
2906                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2907                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2908                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2909                                     grc_local_ctrl,
2910                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2911                 }
2912
2913                 /* On 5753 and variants, GPIO2 cannot be used. */
2914                 no_gpio2 = tp->nic_sram_data_cfg &
2915                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2916
2917                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2918                                   GRC_LCLCTRL_GPIO_OE1 |
2919                                   GRC_LCLCTRL_GPIO_OE2 |
2920                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2921                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2922                 if (no_gpio2) {
2923                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2924                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2925                 }
2926                 tw32_wait_f(GRC_LOCAL_CTRL,
2927                             tp->grc_local_ctrl | grc_local_ctrl,
2928                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2929
2930                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2931
2932                 tw32_wait_f(GRC_LOCAL_CTRL,
2933                             tp->grc_local_ctrl | grc_local_ctrl,
2934                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2935
2936                 if (!no_gpio2) {
2937                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2938                         tw32_wait_f(GRC_LOCAL_CTRL,
2939                                     tp->grc_local_ctrl | grc_local_ctrl,
2940                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2941                 }
2942         }
2943 }
2944
2945 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2946 {
2947         u32 msg = 0;
2948
2949         /* Serialize power state transitions */
2950         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2951                 return;
2952
2953         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2954                 msg = TG3_GPIO_MSG_NEED_VAUX;
2955
2956         msg = tg3_set_function_status(tp, msg);
2957
2958         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2959                 goto done;
2960
2961         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2962                 tg3_pwrsrc_switch_to_vaux(tp);
2963         else
2964                 tg3_pwrsrc_die_with_vmain(tp);
2965
2966 done:
2967         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2968 }
2969
2970 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2971 {
2972         bool need_vaux = false;
2973
2974         /* The GPIOs do something completely different on 57765. */
2975         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2976                 return;
2977
2978         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2979             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2980             tg3_asic_rev(tp) == ASIC_REV_5720) {
2981                 tg3_frob_aux_power_5717(tp, include_wol ?
2982                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2983                 return;
2984         }
2985
2986         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2987                 struct net_device *dev_peer;
2988
2989                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2990
2991                 /* remove_one() may have been run on the peer. */
2992                 if (dev_peer) {
2993                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2994
2995                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2996                                 return;
2997
2998                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2999                             tg3_flag(tp_peer, ENABLE_ASF))
3000                                 need_vaux = true;
3001                 }
3002         }
3003
3004         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3005             tg3_flag(tp, ENABLE_ASF))
3006                 need_vaux = true;
3007
3008         if (need_vaux)
3009                 tg3_pwrsrc_switch_to_vaux(tp);
3010         else
3011                 tg3_pwrsrc_die_with_vmain(tp);
3012 }
3013
3014 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3015 {
3016         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3017                 return 1;
3018         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3019                 if (speed != SPEED_10)
3020                         return 1;
3021         } else if (speed == SPEED_10)
3022                 return 1;
3023
3024         return 0;
3025 }
3026
3027 static bool tg3_phy_power_bug(struct tg3 *tp)
3028 {
3029         switch (tg3_asic_rev(tp)) {
3030         case ASIC_REV_5700:
3031         case ASIC_REV_5704:
3032                 return true;
3033         case ASIC_REV_5780:
3034                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3035                         return true;
3036                 return false;
3037         case ASIC_REV_5717:
3038                 if (!tp->pci_fn)
3039                         return true;
3040                 return false;
3041         case ASIC_REV_5719:
3042         case ASIC_REV_5720:
3043                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3044                     !tp->pci_fn)
3045                         return true;
3046                 return false;
3047         }
3048
3049         return false;
3050 }
3051
3052 static bool tg3_phy_led_bug(struct tg3 *tp)
3053 {
3054         switch (tg3_asic_rev(tp)) {
3055         case ASIC_REV_5719:
3056         case ASIC_REV_5720:
3057                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3058                     !tp->pci_fn)
3059                         return true;
3060                 return false;
3061         }
3062
3063         return false;
3064 }
3065
3066 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3067 {
3068         u32 val;
3069
3070         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3071                 return;
3072
3073         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3074                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3075                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3076                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3077
3078                         sg_dig_ctrl |=
3079                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3080                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3081                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3082                 }
3083                 return;
3084         }
3085
3086         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3087                 tg3_bmcr_reset(tp);
3088                 val = tr32(GRC_MISC_CFG);
3089                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3090                 udelay(40);
3091                 return;
3092         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3093                 u32 phytest;
3094                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3095                         u32 phy;
3096
3097                         tg3_writephy(tp, MII_ADVERTISE, 0);
3098                         tg3_writephy(tp, MII_BMCR,
3099                                      BMCR_ANENABLE | BMCR_ANRESTART);
3100
3101                         tg3_writephy(tp, MII_TG3_FET_TEST,
3102                                      phytest | MII_TG3_FET_SHADOW_EN);
3103                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3104                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3105                                 tg3_writephy(tp,
3106                                              MII_TG3_FET_SHDW_AUXMODE4,
3107                                              phy);
3108                         }
3109                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3110                 }
3111                 return;
3112         } else if (do_low_power) {
3113                 if (!tg3_phy_led_bug(tp))
3114                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3115                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3116
3117                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3118                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3119                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3120                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3121         }
3122
3123         /* The PHY should not be powered down on some chips because
3124          * of bugs.
3125          */
3126         if (tg3_phy_power_bug(tp))
3127                 return;
3128
3129         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3130             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3131                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3132                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3133                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3134                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3135         }
3136
3137         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3138 }
3139
3140 /* tp->lock is held. */
3141 static int tg3_nvram_lock(struct tg3 *tp)
3142 {
3143         if (tg3_flag(tp, NVRAM)) {
3144                 int i;
3145
3146                 if (tp->nvram_lock_cnt == 0) {
3147                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3148                         for (i = 0; i < 8000; i++) {
3149                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3150                                         break;
3151                                 udelay(20);
3152                         }
3153                         if (i == 8000) {
3154                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3155                                 return -ENODEV;
3156                         }
3157                 }
3158                 tp->nvram_lock_cnt++;
3159         }
3160         return 0;
3161 }
3162
3163 /* tp->lock is held. */
3164 static void tg3_nvram_unlock(struct tg3 *tp)
3165 {
3166         if (tg3_flag(tp, NVRAM)) {
3167                 if (tp->nvram_lock_cnt > 0)
3168                         tp->nvram_lock_cnt--;
3169                 if (tp->nvram_lock_cnt == 0)
3170                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3171         }
3172 }
3173
3174 /* tp->lock is held. */
3175 static void tg3_enable_nvram_access(struct tg3 *tp)
3176 {
3177         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178                 u32 nvaccess = tr32(NVRAM_ACCESS);
3179
3180                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3181         }
3182 }
3183
3184 /* tp->lock is held. */
3185 static void tg3_disable_nvram_access(struct tg3 *tp)
3186 {
3187         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3188                 u32 nvaccess = tr32(NVRAM_ACCESS);
3189
3190                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3191         }
3192 }
3193
3194 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3195                                         u32 offset, u32 *val)
3196 {
3197         u32 tmp;
3198         int i;
3199
3200         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3201                 return -EINVAL;
3202
3203         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3204                                         EEPROM_ADDR_DEVID_MASK |
3205                                         EEPROM_ADDR_READ);
3206         tw32(GRC_EEPROM_ADDR,
3207              tmp |
3208              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3209              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3210               EEPROM_ADDR_ADDR_MASK) |
3211              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3212
3213         for (i = 0; i < 1000; i++) {
3214                 tmp = tr32(GRC_EEPROM_ADDR);
3215
3216                 if (tmp & EEPROM_ADDR_COMPLETE)
3217                         break;
3218                 msleep(1);
3219         }
3220         if (!(tmp & EEPROM_ADDR_COMPLETE))
3221                 return -EBUSY;
3222
3223         tmp = tr32(GRC_EEPROM_DATA);
3224
3225         /*
3226          * The data will always be opposite the native endian
3227          * format.  Perform a blind byteswap to compensate.
3228          */
3229         *val = swab32(tmp);
3230
3231         return 0;
3232 }
3233
3234 #define NVRAM_CMD_TIMEOUT 10000
3235
3236 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3237 {
3238         int i;
3239
3240         tw32(NVRAM_CMD, nvram_cmd);
3241         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3242                 usleep_range(10, 40);
3243                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3244                         udelay(10);
3245                         break;
3246                 }
3247         }
3248
3249         if (i == NVRAM_CMD_TIMEOUT)
3250                 return -EBUSY;
3251
3252         return 0;
3253 }
3254
3255 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3256 {
3257         if (tg3_flag(tp, NVRAM) &&
3258             tg3_flag(tp, NVRAM_BUFFERED) &&
3259             tg3_flag(tp, FLASH) &&
3260             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3261             (tp->nvram_jedecnum == JEDEC_ATMEL))
3262
3263                 addr = ((addr / tp->nvram_pagesize) <<
3264                         ATMEL_AT45DB0X1B_PAGE_POS) +
3265                        (addr % tp->nvram_pagesize);
3266
3267         return addr;
3268 }
3269
3270 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3271 {
3272         if (tg3_flag(tp, NVRAM) &&
3273             tg3_flag(tp, NVRAM_BUFFERED) &&
3274             tg3_flag(tp, FLASH) &&
3275             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3276             (tp->nvram_jedecnum == JEDEC_ATMEL))
3277
3278                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3279                         tp->nvram_pagesize) +
3280                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3281
3282         return addr;
3283 }
3284
3285 /* NOTE: Data read in from NVRAM is byteswapped according to
3286  * the byteswapping settings for all other register accesses.
3287  * tg3 devices are BE devices, so on a BE machine, the data
3288  * returned will be exactly as it is seen in NVRAM.  On a LE
3289  * machine, the 32-bit value will be byteswapped.
3290  */
3291 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3292 {
3293         int ret;
3294
3295         if (!tg3_flag(tp, NVRAM))
3296                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3297
3298         offset = tg3_nvram_phys_addr(tp, offset);
3299
3300         if (offset > NVRAM_ADDR_MSK)
3301                 return -EINVAL;
3302
3303         ret = tg3_nvram_lock(tp);
3304         if (ret)
3305                 return ret;
3306
3307         tg3_enable_nvram_access(tp);
3308
3309         tw32(NVRAM_ADDR, offset);
3310         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3311                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3312
3313         if (ret == 0)
3314                 *val = tr32(NVRAM_RDDATA);
3315
3316         tg3_disable_nvram_access(tp);
3317
3318         tg3_nvram_unlock(tp);
3319
3320         return ret;
3321 }
3322
3323 /* Ensures NVRAM data is in bytestream format. */
3324 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3325 {
3326         u32 v;
3327         int res = tg3_nvram_read(tp, offset, &v);
3328         if (!res)
3329                 *val = cpu_to_be32(v);
3330         return res;
3331 }
3332
3333 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3334                                     u32 offset, u32 len, u8 *buf)
3335 {
3336         int i, j, rc = 0;
3337         u32 val;
3338
3339         for (i = 0; i < len; i += 4) {
3340                 u32 addr;
3341                 __be32 data;
3342
3343                 addr = offset + i;
3344
3345                 memcpy(&data, buf + i, 4);
3346
3347                 /*
3348                  * The SEEPROM interface expects the data to always be opposite
3349                  * the native endian format.  We accomplish this by reversing
3350                  * all the operations that would have been performed on the
3351                  * data from a call to tg3_nvram_read_be32().
3352                  */
3353                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3354
3355                 val = tr32(GRC_EEPROM_ADDR);
3356                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3357
3358                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3359                         EEPROM_ADDR_READ);
3360                 tw32(GRC_EEPROM_ADDR, val |
3361                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3362                         (addr & EEPROM_ADDR_ADDR_MASK) |
3363                         EEPROM_ADDR_START |
3364                         EEPROM_ADDR_WRITE);
3365
3366                 for (j = 0; j < 1000; j++) {
3367                         val = tr32(GRC_EEPROM_ADDR);
3368
3369                         if (val & EEPROM_ADDR_COMPLETE)
3370                                 break;
3371                         msleep(1);
3372                 }
3373                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3374                         rc = -EBUSY;
3375                         break;
3376                 }
3377         }
3378
3379         return rc;
3380 }
3381
3382 /* offset and length are dword aligned */
3383 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3384                 u8 *buf)
3385 {
3386         int ret = 0;
3387         u32 pagesize = tp->nvram_pagesize;
3388         u32 pagemask = pagesize - 1;
3389         u32 nvram_cmd;
3390         u8 *tmp;
3391
3392         tmp = kmalloc(pagesize, GFP_KERNEL);
3393         if (tmp == NULL)
3394                 return -ENOMEM;
3395
3396         while (len) {
3397                 int j;
3398                 u32 phy_addr, page_off, size;
3399
3400                 phy_addr = offset & ~pagemask;
3401
3402                 for (j = 0; j < pagesize; j += 4) {
3403                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3404                                                   (__be32 *) (tmp + j));
3405                         if (ret)
3406                                 break;
3407                 }
3408                 if (ret)
3409                         break;
3410
3411                 page_off = offset & pagemask;
3412                 size = pagesize;
3413                 if (len < size)
3414                         size = len;
3415
3416                 len -= size;
3417
3418                 memcpy(tmp + page_off, buf, size);
3419
3420                 offset = offset + (pagesize - page_off);
3421
3422                 tg3_enable_nvram_access(tp);
3423
3424                 /*
3425                  * Before we can erase the flash page, we need
3426                  * to issue a special "write enable" command.
3427                  */
3428                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3429
3430                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431                         break;
3432
3433                 /* Erase the target page */
3434                 tw32(NVRAM_ADDR, phy_addr);
3435
3436                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3437                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3438
3439                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440                         break;
3441
3442                 /* Issue another write enable to start the write. */
3443                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3444
3445                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3446                         break;
3447
3448                 for (j = 0; j < pagesize; j += 4) {
3449                         __be32 data;
3450
3451                         data = *((__be32 *) (tmp + j));
3452
3453                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3454
3455                         tw32(NVRAM_ADDR, phy_addr + j);
3456
3457                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3458                                 NVRAM_CMD_WR;
3459
3460                         if (j == 0)
3461                                 nvram_cmd |= NVRAM_CMD_FIRST;
3462                         else if (j == (pagesize - 4))
3463                                 nvram_cmd |= NVRAM_CMD_LAST;
3464
3465                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3466                         if (ret)
3467                                 break;
3468                 }
3469                 if (ret)
3470                         break;
3471         }
3472
3473         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3474         tg3_nvram_exec_cmd(tp, nvram_cmd);
3475
3476         kfree(tmp);
3477
3478         return ret;
3479 }
3480
3481 /* offset and length are dword aligned */
3482 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3483                 u8 *buf)
3484 {
3485         int i, ret = 0;
3486
3487         for (i = 0; i < len; i += 4, offset += 4) {
3488                 u32 page_off, phy_addr, nvram_cmd;
3489                 __be32 data;
3490
3491                 memcpy(&data, buf + i, 4);
3492                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3493
3494                 page_off = offset % tp->nvram_pagesize;
3495
3496                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3497
3498                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3499
3500                 if (page_off == 0 || i == 0)
3501                         nvram_cmd |= NVRAM_CMD_FIRST;
3502                 if (page_off == (tp->nvram_pagesize - 4))
3503                         nvram_cmd |= NVRAM_CMD_LAST;
3504
3505                 if (i == (len - 4))
3506                         nvram_cmd |= NVRAM_CMD_LAST;
3507
3508                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3509                     !tg3_flag(tp, FLASH) ||
3510                     !tg3_flag(tp, 57765_PLUS))
3511                         tw32(NVRAM_ADDR, phy_addr);
3512
3513                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3514                     !tg3_flag(tp, 5755_PLUS) &&
3515                     (tp->nvram_jedecnum == JEDEC_ST) &&
3516                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3517                         u32 cmd;
3518
3519                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3520                         ret = tg3_nvram_exec_cmd(tp, cmd);
3521                         if (ret)
3522                                 break;
3523                 }
3524                 if (!tg3_flag(tp, FLASH)) {
3525                         /* We always do complete word writes to eeprom. */
3526                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3527                 }
3528
3529                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3530                 if (ret)
3531                         break;
3532         }
3533         return ret;
3534 }
3535
3536 /* offset and length are dword aligned */
3537 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3538 {
3539         int ret;
3540
3541         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3542                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3543                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3544                 udelay(40);
3545         }
3546
3547         if (!tg3_flag(tp, NVRAM)) {
3548                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3549         } else {
3550                 u32 grc_mode;
3551
3552                 ret = tg3_nvram_lock(tp);
3553                 if (ret)
3554                         return ret;
3555
3556                 tg3_enable_nvram_access(tp);
3557                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3558                         tw32(NVRAM_WRITE1, 0x406);
3559
3560                 grc_mode = tr32(GRC_MODE);
3561                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3562
3563                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3564                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3565                                 buf);
3566                 } else {
3567                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3568                                 buf);
3569                 }
3570
3571                 grc_mode = tr32(GRC_MODE);
3572                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3573
3574                 tg3_disable_nvram_access(tp);
3575                 tg3_nvram_unlock(tp);
3576         }
3577
3578         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3579                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3580                 udelay(40);
3581         }
3582
3583         return ret;
3584 }
3585
3586 #define RX_CPU_SCRATCH_BASE     0x30000
3587 #define RX_CPU_SCRATCH_SIZE     0x04000
3588 #define TX_CPU_SCRATCH_BASE     0x34000
3589 #define TX_CPU_SCRATCH_SIZE     0x04000
3590
3591 /* tp->lock is held. */
3592 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3593 {
3594         int i;
3595         const int iters = 10000;
3596
3597         for (i = 0; i < iters; i++) {
3598                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3599                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3600                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3601                         break;
3602                 if (pci_channel_offline(tp->pdev))
3603                         return -EBUSY;
3604         }
3605
3606         return (i == iters) ? -EBUSY : 0;
3607 }
3608
3609 /* tp->lock is held. */
3610 static int tg3_rxcpu_pause(struct tg3 *tp)
3611 {
3612         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3613
3614         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3615         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3616         udelay(10);
3617
3618         return rc;
3619 }
3620
3621 /* tp->lock is held. */
3622 static int tg3_txcpu_pause(struct tg3 *tp)
3623 {
3624         return tg3_pause_cpu(tp, TX_CPU_BASE);
3625 }
3626
3627 /* tp->lock is held. */
3628 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3629 {
3630         tw32(cpu_base + CPU_STATE, 0xffffffff);
3631         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3632 }
3633
3634 /* tp->lock is held. */
3635 static void tg3_rxcpu_resume(struct tg3 *tp)
3636 {
3637         tg3_resume_cpu(tp, RX_CPU_BASE);
3638 }
3639
3640 /* tp->lock is held. */
3641 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3642 {
3643         int rc;
3644
3645         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3646
3647         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3648                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3649
3650                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3651                 return 0;
3652         }
3653         if (cpu_base == RX_CPU_BASE) {
3654                 rc = tg3_rxcpu_pause(tp);
3655         } else {
3656                 /*
3657                  * There is only an Rx CPU for the 5750 derivative in the
3658                  * BCM4785.
3659                  */
3660                 if (tg3_flag(tp, IS_SSB_CORE))
3661                         return 0;
3662
3663                 rc = tg3_txcpu_pause(tp);
3664         }
3665
3666         if (rc) {
3667                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3668                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3669                 return -ENODEV;
3670         }
3671
3672         /* Clear firmware's nvram arbitration. */
3673         if (tg3_flag(tp, NVRAM))
3674                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3675         return 0;
3676 }
3677
3678 static int tg3_fw_data_len(struct tg3 *tp,
3679                            const struct tg3_firmware_hdr *fw_hdr)
3680 {
3681         int fw_len;
3682
3683         /* Non fragmented firmware have one firmware header followed by a
3684          * contiguous chunk of data to be written. The length field in that
3685          * header is not the length of data to be written but the complete
3686          * length of the bss. The data length is determined based on
3687          * tp->fw->size minus headers.
3688          *
3689          * Fragmented firmware have a main header followed by multiple
3690          * fragments. Each fragment is identical to non fragmented firmware
3691          * with a firmware header followed by a contiguous chunk of data. In
3692          * the main header, the length field is unused and set to 0xffffffff.
3693          * In each fragment header the length is the entire size of that
3694          * fragment i.e. fragment data + header length. Data length is
3695          * therefore length field in the header minus TG3_FW_HDR_LEN.
3696          */
3697         if (tp->fw_len == 0xffffffff)
3698                 fw_len = be32_to_cpu(fw_hdr->len);
3699         else
3700                 fw_len = tp->fw->size;
3701
3702         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3703 }
3704
3705 /* tp->lock is held. */
3706 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3707                                  u32 cpu_scratch_base, int cpu_scratch_size,
3708                                  const struct tg3_firmware_hdr *fw_hdr)
3709 {
3710         int err, i;
3711         void (*write_op)(struct tg3 *, u32, u32);
3712         int total_len = tp->fw->size;
3713
3714         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3715                 netdev_err(tp->dev,
3716                            "%s: Trying to load TX cpu firmware which is 5705\n",
3717                            __func__);
3718                 return -EINVAL;
3719         }
3720
3721         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3722                 write_op = tg3_write_mem;
3723         else
3724                 write_op = tg3_write_indirect_reg32;
3725
3726         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3727                 /* It is possible that bootcode is still loading at this point.
3728                  * Get the nvram lock first before halting the cpu.
3729                  */
3730                 int lock_err = tg3_nvram_lock(tp);
3731                 err = tg3_halt_cpu(tp, cpu_base);
3732                 if (!lock_err)
3733                         tg3_nvram_unlock(tp);
3734                 if (err)
3735                         goto out;
3736
3737                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3738                         write_op(tp, cpu_scratch_base + i, 0);
3739                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3740                 tw32(cpu_base + CPU_MODE,
3741                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3742         } else {
3743                 /* Subtract additional main header for fragmented firmware and
3744                  * advance to the first fragment
3745                  */
3746                 total_len -= TG3_FW_HDR_LEN;
3747                 fw_hdr++;
3748         }
3749
3750         do {
3751                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3752                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3753                         write_op(tp, cpu_scratch_base +
3754                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3755                                      (i * sizeof(u32)),
3756                                  be32_to_cpu(fw_data[i]));
3757
3758                 total_len -= be32_to_cpu(fw_hdr->len);
3759
3760                 /* Advance to next fragment */
3761                 fw_hdr = (struct tg3_firmware_hdr *)
3762                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3763         } while (total_len > 0);
3764
3765         err = 0;
3766
3767 out:
3768         return err;
3769 }
3770
3771 /* tp->lock is held. */
3772 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3773 {
3774         int i;
3775         const int iters = 5;
3776
3777         tw32(cpu_base + CPU_STATE, 0xffffffff);
3778         tw32_f(cpu_base + CPU_PC, pc);
3779
3780         for (i = 0; i < iters; i++) {
3781                 if (tr32(cpu_base + CPU_PC) == pc)
3782                         break;
3783                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3784                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3785                 tw32_f(cpu_base + CPU_PC, pc);
3786                 udelay(1000);
3787         }
3788
3789         return (i == iters) ? -EBUSY : 0;
3790 }
3791
3792 /* tp->lock is held. */
3793 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3794 {
3795         const struct tg3_firmware_hdr *fw_hdr;
3796         int err;
3797
3798         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3799
3800         /* Firmware blob starts with version numbers, followed by
3801            start address and length. We are setting complete length.
3802            length = end_address_of_bss - start_address_of_text.
3803            Remainder is the blob to be loaded contiguously
3804            from start address. */
3805
3806         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3807                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3808                                     fw_hdr);
3809         if (err)
3810                 return err;
3811
3812         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3813                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3814                                     fw_hdr);
3815         if (err)
3816                 return err;
3817
3818         /* Now startup only the RX cpu. */
3819         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3820                                        be32_to_cpu(fw_hdr->base_addr));
3821         if (err) {
3822                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3823                            "should be %08x\n", __func__,
3824                            tr32(RX_CPU_BASE + CPU_PC),
3825                                 be32_to_cpu(fw_hdr->base_addr));
3826                 return -ENODEV;
3827         }
3828
3829         tg3_rxcpu_resume(tp);
3830
3831         return 0;
3832 }
3833
3834 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3835 {
3836         const int iters = 1000;
3837         int i;
3838         u32 val;
3839
3840         /* Wait for boot code to complete initialization and enter service
3841          * loop. It is then safe to download service patches
3842          */
3843         for (i = 0; i < iters; i++) {
3844                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3845                         break;
3846
3847                 udelay(10);
3848         }
3849
3850         if (i == iters) {
3851                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3852                 return -EBUSY;
3853         }
3854
3855         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3856         if (val & 0xff) {
3857                 netdev_warn(tp->dev,
3858                             "Other patches exist. Not downloading EEE patch\n");
3859                 return -EEXIST;
3860         }
3861
3862         return 0;
3863 }
3864
3865 /* tp->lock is held. */
3866 static void tg3_load_57766_firmware(struct tg3 *tp)
3867 {
3868         struct tg3_firmware_hdr *fw_hdr;
3869
3870         if (!tg3_flag(tp, NO_NVRAM))
3871                 return;
3872
3873         if (tg3_validate_rxcpu_state(tp))
3874                 return;
3875
3876         if (!tp->fw)
3877                 return;
3878
3879         /* This firmware blob has a different format than older firmware
3880          * releases as given below. The main difference is we have fragmented
3881          * data to be written to non-contiguous locations.
3882          *
3883          * In the beginning we have a firmware header identical to other
3884          * firmware which consists of version, base addr and length. The length
3885          * here is unused and set to 0xffffffff.
3886          *
3887          * This is followed by a series of firmware fragments which are
3888          * individually identical to previous firmware. i.e. they have the
3889          * firmware header and followed by data for that fragment. The version
3890          * field of the individual fragment header is unused.
3891          */
3892
3893         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3894         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3895                 return;
3896
3897         if (tg3_rxcpu_pause(tp))
3898                 return;
3899
3900         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3901         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3902
3903         tg3_rxcpu_resume(tp);
3904 }
3905
3906 /* tp->lock is held. */
3907 static int tg3_load_tso_firmware(struct tg3 *tp)
3908 {
3909         const struct tg3_firmware_hdr *fw_hdr;
3910         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3911         int err;
3912
3913         if (!tg3_flag(tp, FW_TSO))
3914                 return 0;
3915
3916         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3917
3918         /* Firmware blob starts with version numbers, followed by
3919            start address and length. We are setting complete length.
3920            length = end_address_of_bss - start_address_of_text.
3921            Remainder is the blob to be loaded contiguously
3922            from start address. */
3923
3924         cpu_scratch_size = tp->fw_len;
3925
3926         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3927                 cpu_base = RX_CPU_BASE;
3928                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3929         } else {
3930                 cpu_base = TX_CPU_BASE;
3931                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3932                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3933         }
3934
3935         err = tg3_load_firmware_cpu(tp, cpu_base,
3936                                     cpu_scratch_base, cpu_scratch_size,
3937                                     fw_hdr);
3938         if (err)
3939                 return err;
3940
3941         /* Now startup the cpu. */
3942         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3943                                        be32_to_cpu(fw_hdr->base_addr));
3944         if (err) {
3945                 netdev_err(tp->dev,
3946                            "%s fails to set CPU PC, is %08x should be %08x\n",
3947                            __func__, tr32(cpu_base + CPU_PC),
3948                            be32_to_cpu(fw_hdr->base_addr));
3949                 return -ENODEV;
3950         }
3951
3952         tg3_resume_cpu(tp, cpu_base);
3953         return 0;
3954 }
3955
3956 /* tp->lock is held. */
3957 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3958 {
3959         u32 addr_high, addr_low;
3960
3961         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3962         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3963                     (mac_addr[4] <<  8) | mac_addr[5]);
3964
3965         if (index < 4) {
3966                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3967                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3968         } else {
3969                 index -= 4;
3970                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3971                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3972         }
3973 }
3974
3975 /* tp->lock is held. */
3976 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3977 {
3978         u32 addr_high;
3979         int i;
3980
3981         for (i = 0; i < 4; i++) {
3982                 if (i == 1 && skip_mac_1)
3983                         continue;
3984                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3985         }
3986
3987         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3988             tg3_asic_rev(tp) == ASIC_REV_5704) {
3989                 for (i = 4; i < 16; i++)
3990                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3991         }
3992
3993         addr_high = (tp->dev->dev_addr[0] +
3994                      tp->dev->dev_addr[1] +
3995                      tp->dev->dev_addr[2] +
3996                      tp->dev->dev_addr[3] +
3997                      tp->dev->dev_addr[4] +
3998                      tp->dev->dev_addr[5]) &
3999                 TX_BACKOFF_SEED_MASK;
4000         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4001 }
4002
4003 static void tg3_enable_register_access(struct tg3 *tp)
4004 {
4005         /*
4006          * Make sure register accesses (indirect or otherwise) will function
4007          * correctly.
4008          */
4009         pci_write_config_dword(tp->pdev,
4010                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4011 }
4012
4013 static int tg3_power_up(struct tg3 *tp)
4014 {
4015         int err;
4016
4017         tg3_enable_register_access(tp);
4018
4019         err = pci_set_power_state(tp->pdev, PCI_D0);
4020         if (!err) {
4021                 /* Switch out of Vaux if it is a NIC */
4022                 tg3_pwrsrc_switch_to_vmain(tp);
4023         } else {
4024                 netdev_err(tp->dev, "Transition to D0 failed\n");
4025         }
4026
4027         return err;
4028 }
4029
4030 static int tg3_setup_phy(struct tg3 *, bool);
4031
4032 static int tg3_power_down_prepare(struct tg3 *tp)
4033 {
4034         u32 misc_host_ctrl;
4035         bool device_should_wake, do_low_power;
4036
4037         tg3_enable_register_access(tp);
4038
4039         /* Restore the CLKREQ setting. */
4040         if (tg3_flag(tp, CLKREQ_BUG))
4041                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4042                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4043
4044         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4045         tw32(TG3PCI_MISC_HOST_CTRL,
4046              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4047
4048         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4049                              tg3_flag(tp, WOL_ENABLE);
4050
4051         if (tg3_flag(tp, USE_PHYLIB)) {
4052                 do_low_power = false;
4053                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4054                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4055                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4056                         struct phy_device *phydev;
4057                         u32 phyid;
4058
4059                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4060
4061                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4062
4063                         tp->link_config.speed = phydev->speed;
4064                         tp->link_config.duplex = phydev->duplex;
4065                         tp->link_config.autoneg = phydev->autoneg;
4066                         ethtool_convert_link_mode_to_legacy_u32(
4067                                 &tp->link_config.advertising,
4068                                 phydev->advertising);
4069
4070                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4071                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4072                                          advertising);
4073                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4074                                          advertising);
4075                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4076                                          advertising);
4077
4078                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4079                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4080                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4081                                                          advertising);
4082                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4083                                                          advertising);
4084                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4085                                                          advertising);
4086                                 } else {
4087                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4088                                                          advertising);
4089                                 }
4090                         }
4091
4092                         linkmode_copy(phydev->advertising, advertising);
4093                         phy_start_aneg(phydev);
4094
4095                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4096                         if (phyid != PHY_ID_BCMAC131) {
4097                                 phyid &= PHY_BCM_OUI_MASK;
4098                                 if (phyid == PHY_BCM_OUI_1 ||
4099                                     phyid == PHY_BCM_OUI_2 ||
4100                                     phyid == PHY_BCM_OUI_3)
4101                                         do_low_power = true;
4102                         }
4103                 }
4104         } else {
4105                 do_low_power = true;
4106
4107                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4108                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4109
4110                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4111                         tg3_setup_phy(tp, false);
4112         }
4113
4114         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4115                 u32 val;
4116
4117                 val = tr32(GRC_VCPU_EXT_CTRL);
4118                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4119         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4120                 int i;
4121                 u32 val;
4122
4123                 for (i = 0; i < 200; i++) {
4124                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4125                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4126                                 break;
4127                         msleep(1);
4128                 }
4129         }
4130         if (tg3_flag(tp, WOL_CAP))
4131                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4132                                                      WOL_DRV_STATE_SHUTDOWN |
4133                                                      WOL_DRV_WOL |
4134                                                      WOL_SET_MAGIC_PKT);
4135
4136         if (device_should_wake) {
4137                 u32 mac_mode;
4138
4139                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4140                         if (do_low_power &&
4141                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4142                                 tg3_phy_auxctl_write(tp,
4143                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4144                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4145                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4146                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4147                                 udelay(40);
4148                         }
4149
4150                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4151                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4152                         else if (tp->phy_flags &
4153                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4154                                 if (tp->link_config.active_speed == SPEED_1000)
4155                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4156                                 else
4157                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4158                         } else
4159                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4160
4161                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4162                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4163                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4164                                              SPEED_100 : SPEED_10;
4165                                 if (tg3_5700_link_polarity(tp, speed))
4166                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4167                                 else
4168                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4169                         }
4170                 } else {
4171                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4172                 }
4173
4174                 if (!tg3_flag(tp, 5750_PLUS))
4175                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4176
4177                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4178                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4179                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4180                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4181
4182                 if (tg3_flag(tp, ENABLE_APE))
4183                         mac_mode |= MAC_MODE_APE_TX_EN |
4184                                     MAC_MODE_APE_RX_EN |
4185                                     MAC_MODE_TDE_ENABLE;
4186
4187                 tw32_f(MAC_MODE, mac_mode);
4188                 udelay(100);
4189
4190                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4191                 udelay(10);
4192         }
4193
4194         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4195             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4196              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4197                 u32 base_val;
4198
4199                 base_val = tp->pci_clock_ctrl;
4200                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4201                              CLOCK_CTRL_TXCLK_DISABLE);
4202
4203                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4204                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4205         } else if (tg3_flag(tp, 5780_CLASS) ||
4206                    tg3_flag(tp, CPMU_PRESENT) ||
4207                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4208                 /* do nothing */
4209         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4210                 u32 newbits1, newbits2;
4211
4212                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4214                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4215                                     CLOCK_CTRL_TXCLK_DISABLE |
4216                                     CLOCK_CTRL_ALTCLK);
4217                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218                 } else if (tg3_flag(tp, 5705_PLUS)) {
4219                         newbits1 = CLOCK_CTRL_625_CORE;
4220                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4221                 } else {
4222                         newbits1 = CLOCK_CTRL_ALTCLK;
4223                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4224                 }
4225
4226                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4227                             40);
4228
4229                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4230                             40);
4231
4232                 if (!tg3_flag(tp, 5705_PLUS)) {
4233                         u32 newbits3;
4234
4235                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4236                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4237                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4238                                             CLOCK_CTRL_TXCLK_DISABLE |
4239                                             CLOCK_CTRL_44MHZ_CORE);
4240                         } else {
4241                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4242                         }
4243
4244                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4245                                     tp->pci_clock_ctrl | newbits3, 40);
4246                 }
4247         }
4248
4249         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4250                 tg3_power_down_phy(tp, do_low_power);
4251
4252         tg3_frob_aux_power(tp, true);
4253
4254         /* Workaround for unstable PLL clock */
4255         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4256             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4257              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4258                 u32 val = tr32(0x7d00);
4259
4260                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4261                 tw32(0x7d00, val);
4262                 if (!tg3_flag(tp, ENABLE_ASF)) {
4263                         int err;
4264
4265                         err = tg3_nvram_lock(tp);
4266                         tg3_halt_cpu(tp, RX_CPU_BASE);
4267                         if (!err)
4268                                 tg3_nvram_unlock(tp);
4269                 }
4270         }
4271
4272         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4273
4274         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4275
4276         return 0;
4277 }
4278
4279 static void tg3_power_down(struct tg3 *tp)
4280 {
4281         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4282         pci_set_power_state(tp->pdev, PCI_D3hot);
4283 }
4284
4285 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4286 {
4287         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4288         case MII_TG3_AUX_STAT_10HALF:
4289                 *speed = SPEED_10;
4290                 *duplex = DUPLEX_HALF;
4291                 break;
4292
4293         case MII_TG3_AUX_STAT_10FULL:
4294                 *speed = SPEED_10;
4295                 *duplex = DUPLEX_FULL;
4296                 break;
4297
4298         case MII_TG3_AUX_STAT_100HALF:
4299                 *speed = SPEED_100;
4300                 *duplex = DUPLEX_HALF;
4301                 break;
4302
4303         case MII_TG3_AUX_STAT_100FULL:
4304                 *speed = SPEED_100;
4305                 *duplex = DUPLEX_FULL;
4306                 break;
4307
4308         case MII_TG3_AUX_STAT_1000HALF:
4309                 *speed = SPEED_1000;
4310                 *duplex = DUPLEX_HALF;
4311                 break;
4312
4313         case MII_TG3_AUX_STAT_1000FULL:
4314                 *speed = SPEED_1000;
4315                 *duplex = DUPLEX_FULL;
4316                 break;
4317
4318         default:
4319                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4320                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4321                                  SPEED_10;
4322                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4323                                   DUPLEX_HALF;
4324                         break;
4325                 }
4326                 *speed = SPEED_UNKNOWN;
4327                 *duplex = DUPLEX_UNKNOWN;
4328                 break;
4329         }
4330 }
4331
4332 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4333 {
4334         int err = 0;
4335         u32 val, new_adv;
4336
4337         new_adv = ADVERTISE_CSMA;
4338         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4339         new_adv |= mii_advertise_flowctrl(flowctrl);
4340
4341         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4342         if (err)
4343                 goto done;
4344
4345         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4346                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4347
4348                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4349                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4350                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4351
4352                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4353                 if (err)
4354                         goto done;
4355         }
4356
4357         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4358                 goto done;
4359
4360         tw32(TG3_CPMU_EEE_MODE,
4361              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4362
4363         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4364         if (!err) {
4365                 u32 err2;
4366
4367                 val = 0;
4368                 /* Advertise 100-BaseTX EEE ability */
4369                 if (advertise & ADVERTISED_100baseT_Full)
4370                         val |= MDIO_AN_EEE_ADV_100TX;
4371                 /* Advertise 1000-BaseT EEE ability */
4372                 if (advertise & ADVERTISED_1000baseT_Full)
4373                         val |= MDIO_AN_EEE_ADV_1000T;
4374
4375                 if (!tp->eee.eee_enabled) {
4376                         val = 0;
4377                         tp->eee.advertised = 0;
4378                 } else {
4379                         tp->eee.advertised = advertise &
4380                                              (ADVERTISED_100baseT_Full |
4381                                               ADVERTISED_1000baseT_Full);
4382                 }
4383
4384                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4385                 if (err)
4386                         val = 0;
4387
4388                 switch (tg3_asic_rev(tp)) {
4389                 case ASIC_REV_5717:
4390                 case ASIC_REV_57765:
4391                 case ASIC_REV_57766:
4392                 case ASIC_REV_5719:
4393                         /* If we advertised any eee advertisements above... */
4394                         if (val)
4395                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4396                                       MII_TG3_DSP_TAP26_RMRXSTO |
4397                                       MII_TG3_DSP_TAP26_OPCSINPT;
4398                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4399                         /* Fall through */
4400                 case ASIC_REV_5720:
4401                 case ASIC_REV_5762:
4402                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4403                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4404                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4405                 }
4406
4407                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4408                 if (!err)
4409                         err = err2;
4410         }
4411
4412 done:
4413         return err;
4414 }
4415
4416 static void tg3_phy_copper_begin(struct tg3 *tp)
4417 {
4418         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4419             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4420                 u32 adv, fc;
4421
4422                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424                         adv = ADVERTISED_10baseT_Half |
4425                               ADVERTISED_10baseT_Full;
4426                         if (tg3_flag(tp, WOL_SPEED_100MB))
4427                                 adv |= ADVERTISED_100baseT_Half |
4428                                        ADVERTISED_100baseT_Full;
4429                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4430                                 if (!(tp->phy_flags &
4431                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4432                                         adv |= ADVERTISED_1000baseT_Half;
4433                                 adv |= ADVERTISED_1000baseT_Full;
4434                         }
4435
4436                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4437                 } else {
4438                         adv = tp->link_config.advertising;
4439                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4440                                 adv &= ~(ADVERTISED_1000baseT_Half |
4441                                          ADVERTISED_1000baseT_Full);
4442
4443                         fc = tp->link_config.flowctrl;
4444                 }
4445
4446                 tg3_phy_autoneg_cfg(tp, adv, fc);
4447
4448                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4449                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4450                         /* Normally during power down we want to autonegotiate
4451                          * the lowest possible speed for WOL. However, to avoid
4452                          * link flap, we leave it untouched.
4453                          */
4454                         return;
4455                 }
4456
4457                 tg3_writephy(tp, MII_BMCR,
4458                              BMCR_ANENABLE | BMCR_ANRESTART);
4459         } else {
4460                 int i;
4461                 u32 bmcr, orig_bmcr;
4462
4463                 tp->link_config.active_speed = tp->link_config.speed;
4464                 tp->link_config.active_duplex = tp->link_config.duplex;
4465
4466                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4467                         /* With autoneg disabled, 5715 only links up when the
4468                          * advertisement register has the configured speed
4469                          * enabled.
4470                          */
4471                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4472                 }
4473
4474                 bmcr = 0;
4475                 switch (tp->link_config.speed) {
4476                 default:
4477                 case SPEED_10:
4478                         break;
4479
4480                 case SPEED_100:
4481                         bmcr |= BMCR_SPEED100;
4482                         break;
4483
4484                 case SPEED_1000:
4485                         bmcr |= BMCR_SPEED1000;
4486                         break;
4487                 }
4488
4489                 if (tp->link_config.duplex == DUPLEX_FULL)
4490                         bmcr |= BMCR_FULLDPLX;
4491
4492                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4493                     (bmcr != orig_bmcr)) {
4494                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4495                         for (i = 0; i < 1500; i++) {
4496                                 u32 tmp;
4497
4498                                 udelay(10);
4499                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4500                                     tg3_readphy(tp, MII_BMSR, &tmp))
4501                                         continue;
4502                                 if (!(tmp & BMSR_LSTATUS)) {
4503                                         udelay(40);
4504                                         break;
4505                                 }
4506                         }
4507                         tg3_writephy(tp, MII_BMCR, bmcr);
4508                         udelay(40);
4509                 }
4510         }
4511 }
4512
4513 static int tg3_phy_pull_config(struct tg3 *tp)
4514 {
4515         int err;
4516         u32 val;
4517
4518         err = tg3_readphy(tp, MII_BMCR, &val);
4519         if (err)
4520                 goto done;
4521
4522         if (!(val & BMCR_ANENABLE)) {
4523                 tp->link_config.autoneg = AUTONEG_DISABLE;
4524                 tp->link_config.advertising = 0;
4525                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4526
4527                 err = -EIO;
4528
4529                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4530                 case 0:
4531                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4532                                 goto done;
4533
4534                         tp->link_config.speed = SPEED_10;
4535                         break;
4536                 case BMCR_SPEED100:
4537                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4538                                 goto done;
4539
4540                         tp->link_config.speed = SPEED_100;
4541                         break;
4542                 case BMCR_SPEED1000:
4543                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4544                                 tp->link_config.speed = SPEED_1000;
4545                                 break;
4546                         }
4547                         /* Fall through */
4548                 default:
4549                         goto done;
4550                 }
4551
4552                 if (val & BMCR_FULLDPLX)
4553                         tp->link_config.duplex = DUPLEX_FULL;
4554                 else
4555                         tp->link_config.duplex = DUPLEX_HALF;
4556
4557                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4558
4559                 err = 0;
4560                 goto done;
4561         }
4562
4563         tp->link_config.autoneg = AUTONEG_ENABLE;
4564         tp->link_config.advertising = ADVERTISED_Autoneg;
4565         tg3_flag_set(tp, PAUSE_AUTONEG);
4566
4567         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568                 u32 adv;
4569
4570                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4571                 if (err)
4572                         goto done;
4573
4574                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4575                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4576
4577                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4578         } else {
4579                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4580         }
4581
4582         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4583                 u32 adv;
4584
4585                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4586                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4587                         if (err)
4588                                 goto done;
4589
4590                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4591                 } else {
4592                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4593                         if (err)
4594                                 goto done;
4595
4596                         adv = tg3_decode_flowctrl_1000X(val);
4597                         tp->link_config.flowctrl = adv;
4598
4599                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4600                         adv = mii_adv_to_ethtool_adv_x(val);
4601                 }
4602
4603                 tp->link_config.advertising |= adv;
4604         }
4605
4606 done:
4607         return err;
4608 }
4609
4610 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4611 {
4612         int err;
4613
4614         /* Turn off tap power management. */
4615         /* Set Extended packet length bit */
4616         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4617
4618         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4619         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4620         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4621         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4622         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4623
4624         udelay(40);
4625
4626         return err;
4627 }
4628
4629 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4630 {
4631         struct ethtool_eee eee;
4632
4633         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4634                 return true;
4635
4636         tg3_eee_pull_config(tp, &eee);
4637
4638         if (tp->eee.eee_enabled) {
4639                 if (tp->eee.advertised != eee.advertised ||
4640                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4641                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4642                         return false;
4643         } else {
4644                 /* EEE is disabled but we're advertising */
4645                 if (eee.advertised)
4646                         return false;
4647         }
4648
4649         return true;
4650 }
4651
4652 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4653 {
4654         u32 advmsk, tgtadv, advertising;
4655
4656         advertising = tp->link_config.advertising;
4657         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4658
4659         advmsk = ADVERTISE_ALL;
4660         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4661                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4662                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4663         }
4664
4665         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4666                 return false;
4667
4668         if ((*lcladv & advmsk) != tgtadv)
4669                 return false;
4670
4671         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4672                 u32 tg3_ctrl;
4673
4674                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4675
4676                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4677                         return false;
4678
4679                 if (tgtadv &&
4680                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4681                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4682                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4683                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4684                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4685                 } else {
4686                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4687                 }
4688
4689                 if (tg3_ctrl != tgtadv)
4690                         return false;
4691         }
4692
4693         return true;
4694 }
4695
4696 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4697 {
4698         u32 lpeth = 0;
4699
4700         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4701                 u32 val;
4702
4703                 if (tg3_readphy(tp, MII_STAT1000, &val))
4704                         return false;
4705
4706                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4707         }
4708
4709         if (tg3_readphy(tp, MII_LPA, rmtadv))
4710                 return false;
4711
4712         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4713         tp->link_config.rmt_adv = lpeth;
4714
4715         return true;
4716 }
4717
4718 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4719 {
4720         if (curr_link_up != tp->link_up) {
4721                 if (curr_link_up) {
4722                         netif_carrier_on(tp->dev);
4723                 } else {
4724                         netif_carrier_off(tp->dev);
4725                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4726                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4727                 }
4728
4729                 tg3_link_report(tp);
4730                 return true;
4731         }
4732
4733         return false;
4734 }
4735
4736 static void tg3_clear_mac_status(struct tg3 *tp)
4737 {
4738         tw32(MAC_EVENT, 0);
4739
4740         tw32_f(MAC_STATUS,
4741                MAC_STATUS_SYNC_CHANGED |
4742                MAC_STATUS_CFG_CHANGED |
4743                MAC_STATUS_MI_COMPLETION |
4744                MAC_STATUS_LNKSTATE_CHANGED);
4745         udelay(40);
4746 }
4747
4748 static void tg3_setup_eee(struct tg3 *tp)
4749 {
4750         u32 val;
4751
4752         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4753               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4754         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4755                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4756
4757         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4758
4759         tw32_f(TG3_CPMU_EEE_CTRL,
4760                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4761
4762         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4763               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4764               TG3_CPMU_EEEMD_LPI_IN_RX |
4765               TG3_CPMU_EEEMD_EEE_ENABLE;
4766
4767         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4768                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4769
4770         if (tg3_flag(tp, ENABLE_APE))
4771                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4772
4773         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4774
4775         tw32_f(TG3_CPMU_EEE_DBTMR1,
4776                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4777                (tp->eee.tx_lpi_timer & 0xffff));
4778
4779         tw32_f(TG3_CPMU_EEE_DBTMR2,
4780                TG3_CPMU_DBTMR2_APE_TX_2047US |
4781                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4782 }
4783
4784 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4785 {
4786         bool current_link_up;
4787         u32 bmsr, val;
4788         u32 lcl_adv, rmt_adv;
4789         u16 current_speed;
4790         u8 current_duplex;
4791         int i, err;
4792
4793         tg3_clear_mac_status(tp);
4794
4795         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4796                 tw32_f(MAC_MI_MODE,
4797                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4798                 udelay(80);
4799         }
4800
4801         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4802
4803         /* Some third-party PHYs need to be reset on link going
4804          * down.
4805          */
4806         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4807              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4808              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4809             tp->link_up) {
4810                 tg3_readphy(tp, MII_BMSR, &bmsr);
4811                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812                     !(bmsr & BMSR_LSTATUS))
4813                         force_reset = true;
4814         }
4815         if (force_reset)
4816                 tg3_phy_reset(tp);
4817
4818         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4819                 tg3_readphy(tp, MII_BMSR, &bmsr);
4820                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4821                     !tg3_flag(tp, INIT_COMPLETE))
4822                         bmsr = 0;
4823
4824                 if (!(bmsr & BMSR_LSTATUS)) {
4825                         err = tg3_init_5401phy_dsp(tp);
4826                         if (err)
4827                                 return err;
4828
4829                         tg3_readphy(tp, MII_BMSR, &bmsr);
4830                         for (i = 0; i < 1000; i++) {
4831                                 udelay(10);
4832                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4833                                     (bmsr & BMSR_LSTATUS)) {
4834                                         udelay(40);
4835                                         break;
4836                                 }
4837                         }
4838
4839                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4840                             TG3_PHY_REV_BCM5401_B0 &&
4841                             !(bmsr & BMSR_LSTATUS) &&
4842                             tp->link_config.active_speed == SPEED_1000) {
4843                                 err = tg3_phy_reset(tp);
4844                                 if (!err)
4845                                         err = tg3_init_5401phy_dsp(tp);
4846                                 if (err)
4847                                         return err;
4848                         }
4849                 }
4850         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4851                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4852                 /* 5701 {A0,B0} CRC bug workaround */
4853                 tg3_writephy(tp, 0x15, 0x0a75);
4854                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4855                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4856                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4857         }
4858
4859         /* Clear pending interrupts... */
4860         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4861         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4862
4863         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4864                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4865         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4866                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4867
4868         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4869             tg3_asic_rev(tp) == ASIC_REV_5701) {
4870                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4871                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4872                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4873                 else
4874                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4875         }
4876
4877         current_link_up = false;
4878         current_speed = SPEED_UNKNOWN;
4879         current_duplex = DUPLEX_UNKNOWN;
4880         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4881         tp->link_config.rmt_adv = 0;
4882
4883         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4884                 err = tg3_phy_auxctl_read(tp,
4885                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886                                           &val);
4887                 if (!err && !(val & (1 << 10))) {
4888                         tg3_phy_auxctl_write(tp,
4889                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4890                                              val | (1 << 10));
4891                         goto relink;
4892                 }
4893         }
4894
4895         bmsr = 0;
4896         for (i = 0; i < 100; i++) {
4897                 tg3_readphy(tp, MII_BMSR, &bmsr);
4898                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4899                     (bmsr & BMSR_LSTATUS))
4900                         break;
4901                 udelay(40);
4902         }
4903
4904         if (bmsr & BMSR_LSTATUS) {
4905                 u32 aux_stat, bmcr;
4906
4907                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4908                 for (i = 0; i < 2000; i++) {
4909                         udelay(10);
4910                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4911                             aux_stat)
4912                                 break;
4913                 }
4914
4915                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4916                                              &current_speed,
4917                                              &current_duplex);
4918
4919                 bmcr = 0;
4920                 for (i = 0; i < 200; i++) {
4921                         tg3_readphy(tp, MII_BMCR, &bmcr);
4922                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4923                                 continue;
4924                         if (bmcr && bmcr != 0x7fff)
4925                                 break;
4926                         udelay(10);
4927                 }
4928
4929                 lcl_adv = 0;
4930                 rmt_adv = 0;
4931
4932                 tp->link_config.active_speed = current_speed;
4933                 tp->link_config.active_duplex = current_duplex;
4934
4935                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4936                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4937
4938                         if ((bmcr & BMCR_ANENABLE) &&
4939                             eee_config_ok &&
4940                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4941                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4942                                 current_link_up = true;
4943
4944                         /* EEE settings changes take effect only after a phy
4945                          * reset.  If we have skipped a reset due to Link Flap
4946                          * Avoidance being enabled, do it now.
4947                          */
4948                         if (!eee_config_ok &&
4949                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4950                             !force_reset) {
4951                                 tg3_setup_eee(tp);
4952                                 tg3_phy_reset(tp);
4953                         }
4954                 } else {
4955                         if (!(bmcr & BMCR_ANENABLE) &&
4956                             tp->link_config.speed == current_speed &&
4957                             tp->link_config.duplex == current_duplex) {
4958                                 current_link_up = true;
4959                         }
4960                 }
4961
4962                 if (current_link_up &&
4963                     tp->link_config.active_duplex == DUPLEX_FULL) {
4964                         u32 reg, bit;
4965
4966                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4967                                 reg = MII_TG3_FET_GEN_STAT;
4968                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4969                         } else {
4970                                 reg = MII_TG3_EXT_STAT;
4971                                 bit = MII_TG3_EXT_STAT_MDIX;
4972                         }
4973
4974                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4975                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4976
4977                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4978                 }
4979         }
4980
4981 relink:
4982         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4983                 tg3_phy_copper_begin(tp);
4984
4985                 if (tg3_flag(tp, ROBOSWITCH)) {
4986                         current_link_up = true;
4987                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4988                         current_speed = SPEED_1000;
4989                         current_duplex = DUPLEX_FULL;
4990                         tp->link_config.active_speed = current_speed;
4991                         tp->link_config.active_duplex = current_duplex;
4992                 }
4993
4994                 tg3_readphy(tp, MII_BMSR, &bmsr);
4995                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4996                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4997                         current_link_up = true;
4998         }
4999
5000         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5001         if (current_link_up) {
5002                 if (tp->link_config.active_speed == SPEED_100 ||
5003                     tp->link_config.active_speed == SPEED_10)
5004                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5005                 else
5006                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5007         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5008                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5009         else
5010                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5011
5012         /* In order for the 5750 core in BCM4785 chip to work properly
5013          * in RGMII mode, the Led Control Register must be set up.
5014          */
5015         if (tg3_flag(tp, RGMII_MODE)) {
5016                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5017                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5018
5019                 if (tp->link_config.active_speed == SPEED_10)
5020                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5021                 else if (tp->link_config.active_speed == SPEED_100)
5022                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5023                                      LED_CTRL_100MBPS_ON);
5024                 else if (tp->link_config.active_speed == SPEED_1000)
5025                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5026                                      LED_CTRL_1000MBPS_ON);
5027
5028                 tw32(MAC_LED_CTRL, led_ctrl);
5029                 udelay(40);
5030         }
5031
5032         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5033         if (tp->link_config.active_duplex == DUPLEX_HALF)
5034                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5035
5036         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5037                 if (current_link_up &&
5038                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5039                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5040                 else
5041                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5042         }
5043
5044         /* ??? Without this setting Netgear GA302T PHY does not
5045          * ??? send/receive packets...
5046          */
5047         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5048             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5049                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5050                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5051                 udelay(80);
5052         }
5053
5054         tw32_f(MAC_MODE, tp->mac_mode);
5055         udelay(40);
5056
5057         tg3_phy_eee_adjust(tp, current_link_up);
5058
5059         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5060                 /* Polled via timer. */
5061                 tw32_f(MAC_EVENT, 0);
5062         } else {
5063                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5064         }
5065         udelay(40);
5066
5067         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5068             current_link_up &&
5069             tp->link_config.active_speed == SPEED_1000 &&
5070             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5071                 udelay(120);
5072                 tw32_f(MAC_STATUS,
5073                      (MAC_STATUS_SYNC_CHANGED |
5074                       MAC_STATUS_CFG_CHANGED));
5075                 udelay(40);
5076                 tg3_write_mem(tp,
5077                               NIC_SRAM_FIRMWARE_MBOX,
5078                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5079         }
5080
5081         /* Prevent send BD corruption. */
5082         if (tg3_flag(tp, CLKREQ_BUG)) {
5083                 if (tp->link_config.active_speed == SPEED_100 ||
5084                     tp->link_config.active_speed == SPEED_10)
5085                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5086                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5087                 else
5088                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5089                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5090         }
5091
5092         tg3_test_and_report_link_chg(tp, current_link_up);
5093
5094         return 0;
5095 }
5096
5097 struct tg3_fiber_aneginfo {
5098         int state;
5099 #define ANEG_STATE_UNKNOWN              0
5100 #define ANEG_STATE_AN_ENABLE            1
5101 #define ANEG_STATE_RESTART_INIT         2
5102 #define ANEG_STATE_RESTART              3
5103 #define ANEG_STATE_DISABLE_LINK_OK      4
5104 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5105 #define ANEG_STATE_ABILITY_DETECT       6
5106 #define ANEG_STATE_ACK_DETECT_INIT      7
5107 #define ANEG_STATE_ACK_DETECT           8
5108 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5109 #define ANEG_STATE_COMPLETE_ACK         10
5110 #define ANEG_STATE_IDLE_DETECT_INIT     11
5111 #define ANEG_STATE_IDLE_DETECT          12
5112 #define ANEG_STATE_LINK_OK              13
5113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5114 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5115
5116         u32 flags;
5117 #define MR_AN_ENABLE            0x00000001
5118 #define MR_RESTART_AN           0x00000002
5119 #define MR_AN_COMPLETE          0x00000004
5120 #define MR_PAGE_RX              0x00000008
5121 #define MR_NP_LOADED            0x00000010
5122 #define MR_TOGGLE_TX            0x00000020
5123 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5124 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5125 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5126 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5127 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5128 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5129 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5130 #define MR_TOGGLE_RX            0x00002000
5131 #define MR_NP_RX                0x00004000
5132
5133 #define MR_LINK_OK              0x80000000
5134
5135         unsigned long link_time, cur_time;
5136
5137         u32 ability_match_cfg;
5138         int ability_match_count;
5139
5140         char ability_match, idle_match, ack_match;
5141
5142         u32 txconfig, rxconfig;
5143 #define ANEG_CFG_NP             0x00000080
5144 #define ANEG_CFG_ACK            0x00000040
5145 #define ANEG_CFG_RF2            0x00000020
5146 #define ANEG_CFG_RF1            0x00000010
5147 #define ANEG_CFG_PS2            0x00000001
5148 #define ANEG_CFG_PS1            0x00008000
5149 #define ANEG_CFG_HD             0x00004000
5150 #define ANEG_CFG_FD             0x00002000
5151 #define ANEG_CFG_INVAL          0x00001f06
5152
5153 };
5154 #define ANEG_OK         0
5155 #define ANEG_DONE       1
5156 #define ANEG_TIMER_ENAB 2
5157 #define ANEG_FAILED     -1
5158
5159 #define ANEG_STATE_SETTLE_TIME  10000
5160
5161 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5162                                    struct tg3_fiber_aneginfo *ap)
5163 {
5164         u16 flowctrl;
5165         unsigned long delta;
5166         u32 rx_cfg_reg;
5167         int ret;
5168
5169         if (ap->state == ANEG_STATE_UNKNOWN) {
5170                 ap->rxconfig = 0;
5171                 ap->link_time = 0;
5172                 ap->cur_time = 0;
5173                 ap->ability_match_cfg = 0;
5174                 ap->ability_match_count = 0;
5175                 ap->ability_match = 0;
5176                 ap->idle_match = 0;
5177                 ap->ack_match = 0;
5178         }
5179         ap->cur_time++;
5180
5181         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5182                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5183
5184                 if (rx_cfg_reg != ap->ability_match_cfg) {
5185                         ap->ability_match_cfg = rx_cfg_reg;
5186                         ap->ability_match = 0;
5187                         ap->ability_match_count = 0;
5188                 } else {
5189                         if (++ap->ability_match_count > 1) {
5190                                 ap->ability_match = 1;
5191                                 ap->ability_match_cfg = rx_cfg_reg;
5192                         }
5193                 }
5194                 if (rx_cfg_reg & ANEG_CFG_ACK)
5195                         ap->ack_match = 1;
5196                 else
5197                         ap->ack_match = 0;
5198
5199                 ap->idle_match = 0;
5200         } else {
5201                 ap->idle_match = 1;
5202                 ap->ability_match_cfg = 0;
5203                 ap->ability_match_count = 0;
5204                 ap->ability_match = 0;
5205                 ap->ack_match = 0;
5206
5207                 rx_cfg_reg = 0;
5208         }
5209
5210         ap->rxconfig = rx_cfg_reg;
5211         ret = ANEG_OK;
5212
5213         switch (ap->state) {
5214         case ANEG_STATE_UNKNOWN:
5215                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5216                         ap->state = ANEG_STATE_AN_ENABLE;
5217
5218                 /* fall through */
5219         case ANEG_STATE_AN_ENABLE:
5220                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5221                 if (ap->flags & MR_AN_ENABLE) {
5222                         ap->link_time = 0;
5223                         ap->cur_time = 0;
5224                         ap->ability_match_cfg = 0;
5225                         ap->ability_match_count = 0;
5226                         ap->ability_match = 0;
5227                         ap->idle_match = 0;
5228                         ap->ack_match = 0;
5229
5230                         ap->state = ANEG_STATE_RESTART_INIT;
5231                 } else {
5232                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5233                 }
5234                 break;
5235
5236         case ANEG_STATE_RESTART_INIT:
5237                 ap->link_time = ap->cur_time;
5238                 ap->flags &= ~(MR_NP_LOADED);
5239                 ap->txconfig = 0;
5240                 tw32(MAC_TX_AUTO_NEG, 0);
5241                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5242                 tw32_f(MAC_MODE, tp->mac_mode);
5243                 udelay(40);
5244
5245                 ret = ANEG_TIMER_ENAB;
5246                 ap->state = ANEG_STATE_RESTART;
5247
5248                 /* fall through */
5249         case ANEG_STATE_RESTART:
5250                 delta = ap->cur_time - ap->link_time;
5251                 if (delta > ANEG_STATE_SETTLE_TIME)
5252                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5253                 else
5254                         ret = ANEG_TIMER_ENAB;
5255                 break;
5256
5257         case ANEG_STATE_DISABLE_LINK_OK:
5258                 ret = ANEG_DONE;
5259                 break;
5260
5261         case ANEG_STATE_ABILITY_DETECT_INIT:
5262                 ap->flags &= ~(MR_TOGGLE_TX);
5263                 ap->txconfig = ANEG_CFG_FD;
5264                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5265                 if (flowctrl & ADVERTISE_1000XPAUSE)
5266                         ap->txconfig |= ANEG_CFG_PS1;
5267                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5268                         ap->txconfig |= ANEG_CFG_PS2;
5269                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271                 tw32_f(MAC_MODE, tp->mac_mode);
5272                 udelay(40);
5273
5274                 ap->state = ANEG_STATE_ABILITY_DETECT;
5275                 break;
5276
5277         case ANEG_STATE_ABILITY_DETECT:
5278                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5279                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5280                 break;
5281
5282         case ANEG_STATE_ACK_DETECT_INIT:
5283                 ap->txconfig |= ANEG_CFG_ACK;
5284                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5285                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5286                 tw32_f(MAC_MODE, tp->mac_mode);
5287                 udelay(40);
5288
5289                 ap->state = ANEG_STATE_ACK_DETECT;
5290
5291                 /* fall through */
5292         case ANEG_STATE_ACK_DETECT:
5293                 if (ap->ack_match != 0) {
5294                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5295                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5296                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5297                         } else {
5298                                 ap->state = ANEG_STATE_AN_ENABLE;
5299                         }
5300                 } else if (ap->ability_match != 0 &&
5301                            ap->rxconfig == 0) {
5302                         ap->state = ANEG_STATE_AN_ENABLE;
5303                 }
5304                 break;
5305
5306         case ANEG_STATE_COMPLETE_ACK_INIT:
5307                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5308                         ret = ANEG_FAILED;
5309                         break;
5310                 }
5311                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5312                                MR_LP_ADV_HALF_DUPLEX |
5313                                MR_LP_ADV_SYM_PAUSE |
5314                                MR_LP_ADV_ASYM_PAUSE |
5315                                MR_LP_ADV_REMOTE_FAULT1 |
5316                                MR_LP_ADV_REMOTE_FAULT2 |
5317                                MR_LP_ADV_NEXT_PAGE |
5318                                MR_TOGGLE_RX |
5319                                MR_NP_RX);
5320                 if (ap->rxconfig & ANEG_CFG_FD)
5321                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5322                 if (ap->rxconfig & ANEG_CFG_HD)
5323                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5324                 if (ap->rxconfig & ANEG_CFG_PS1)
5325                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5326                 if (ap->rxconfig & ANEG_CFG_PS2)
5327                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5328                 if (ap->rxconfig & ANEG_CFG_RF1)
5329                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5330                 if (ap->rxconfig & ANEG_CFG_RF2)
5331                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5332                 if (ap->rxconfig & ANEG_CFG_NP)
5333                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5334
5335                 ap->link_time = ap->cur_time;
5336
5337                 ap->flags ^= (MR_TOGGLE_TX);
5338                 if (ap->rxconfig & 0x0008)
5339                         ap->flags |= MR_TOGGLE_RX;
5340                 if (ap->rxconfig & ANEG_CFG_NP)
5341                         ap->flags |= MR_NP_RX;
5342                 ap->flags |= MR_PAGE_RX;
5343
5344                 ap->state = ANEG_STATE_COMPLETE_ACK;
5345                 ret = ANEG_TIMER_ENAB;
5346                 break;
5347
5348         case ANEG_STATE_COMPLETE_ACK:
5349                 if (ap->ability_match != 0 &&
5350                     ap->rxconfig == 0) {
5351                         ap->state = ANEG_STATE_AN_ENABLE;
5352                         break;
5353                 }
5354                 delta = ap->cur_time - ap->link_time;
5355                 if (delta > ANEG_STATE_SETTLE_TIME) {
5356                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5357                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358                         } else {
5359                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5360                                     !(ap->flags & MR_NP_RX)) {
5361                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5362                                 } else {
5363                                         ret = ANEG_FAILED;
5364                                 }
5365                         }
5366                 }
5367                 break;
5368
5369         case ANEG_STATE_IDLE_DETECT_INIT:
5370                 ap->link_time = ap->cur_time;
5371                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5372                 tw32_f(MAC_MODE, tp->mac_mode);
5373                 udelay(40);
5374
5375                 ap->state = ANEG_STATE_IDLE_DETECT;
5376                 ret = ANEG_TIMER_ENAB;
5377                 break;
5378
5379         case ANEG_STATE_IDLE_DETECT:
5380                 if (ap->ability_match != 0 &&
5381                     ap->rxconfig == 0) {
5382                         ap->state = ANEG_STATE_AN_ENABLE;
5383                         break;
5384                 }
5385                 delta = ap->cur_time - ap->link_time;
5386                 if (delta > ANEG_STATE_SETTLE_TIME) {
5387                         /* XXX another gem from the Broadcom driver :( */
5388                         ap->state = ANEG_STATE_LINK_OK;
5389                 }
5390                 break;
5391
5392         case ANEG_STATE_LINK_OK:
5393                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5394                 ret = ANEG_DONE;
5395                 break;
5396
5397         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5398                 /* ??? unimplemented */
5399                 break;
5400
5401         case ANEG_STATE_NEXT_PAGE_WAIT:
5402                 /* ??? unimplemented */
5403                 break;
5404
5405         default:
5406                 ret = ANEG_FAILED;
5407                 break;
5408         }
5409
5410         return ret;
5411 }
5412
5413 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5414 {
5415         int res = 0;
5416         struct tg3_fiber_aneginfo aninfo;
5417         int status = ANEG_FAILED;
5418         unsigned int tick;
5419         u32 tmp;
5420
5421         tw32_f(MAC_TX_AUTO_NEG, 0);
5422
5423         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5424         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5425         udelay(40);
5426
5427         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5428         udelay(40);
5429
5430         memset(&aninfo, 0, sizeof(aninfo));
5431         aninfo.flags |= MR_AN_ENABLE;
5432         aninfo.state = ANEG_STATE_UNKNOWN;
5433         aninfo.cur_time = 0;
5434         tick = 0;
5435         while (++tick < 195000) {
5436                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5437                 if (status == ANEG_DONE || status == ANEG_FAILED)
5438                         break;
5439
5440                 udelay(1);
5441         }
5442
5443         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5444         tw32_f(MAC_MODE, tp->mac_mode);
5445         udelay(40);
5446
5447         *txflags = aninfo.txconfig;
5448         *rxflags = aninfo.flags;
5449
5450         if (status == ANEG_DONE &&
5451             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5452                              MR_LP_ADV_FULL_DUPLEX)))
5453                 res = 1;
5454
5455         return res;
5456 }
5457
5458 static void tg3_init_bcm8002(struct tg3 *tp)
5459 {
5460         u32 mac_status = tr32(MAC_STATUS);
5461         int i;
5462
5463         /* Reset when initting first time or we have a link. */
5464         if (tg3_flag(tp, INIT_COMPLETE) &&
5465             !(mac_status & MAC_STATUS_PCS_SYNCED))
5466                 return;
5467
5468         /* Set PLL lock range. */
5469         tg3_writephy(tp, 0x16, 0x8007);
5470
5471         /* SW reset */
5472         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5473
5474         /* Wait for reset to complete. */
5475         /* XXX schedule_timeout() ... */
5476         for (i = 0; i < 500; i++)
5477                 udelay(10);
5478
5479         /* Config mode; select PMA/Ch 1 regs. */
5480         tg3_writephy(tp, 0x10, 0x8411);
5481
5482         /* Enable auto-lock and comdet, select txclk for tx. */
5483         tg3_writephy(tp, 0x11, 0x0a10);
5484
5485         tg3_writephy(tp, 0x18, 0x00a0);
5486         tg3_writephy(tp, 0x16, 0x41ff);
5487
5488         /* Assert and deassert POR. */
5489         tg3_writephy(tp, 0x13, 0x0400);
5490         udelay(40);
5491         tg3_writephy(tp, 0x13, 0x0000);
5492
5493         tg3_writephy(tp, 0x11, 0x0a50);
5494         udelay(40);
5495         tg3_writephy(tp, 0x11, 0x0a10);
5496
5497         /* Wait for signal to stabilize */
5498         /* XXX schedule_timeout() ... */
5499         for (i = 0; i < 15000; i++)
5500                 udelay(10);
5501
5502         /* Deselect the channel register so we can read the PHYID
5503          * later.
5504          */
5505         tg3_writephy(tp, 0x10, 0x8011);
5506 }
5507
5508 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5509 {
5510         u16 flowctrl;
5511         bool current_link_up;
5512         u32 sg_dig_ctrl, sg_dig_status;
5513         u32 serdes_cfg, expected_sg_dig_ctrl;
5514         int workaround, port_a;
5515
5516         serdes_cfg = 0;
5517         expected_sg_dig_ctrl = 0;
5518         workaround = 0;
5519         port_a = 1;
5520         current_link_up = false;
5521
5522         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5523             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5524                 workaround = 1;
5525                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5526                         port_a = 0;
5527
5528                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5529                 /* preserve bits 20-23 for voltage regulator */
5530                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5531         }
5532
5533         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5534
5535         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5536                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5537                         if (workaround) {
5538                                 u32 val = serdes_cfg;
5539
5540                                 if (port_a)
5541                                         val |= 0xc010000;
5542                                 else
5543                                         val |= 0x4010000;
5544                                 tw32_f(MAC_SERDES_CFG, val);
5545                         }
5546
5547                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5548                 }
5549                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5550                         tg3_setup_flow_control(tp, 0, 0);
5551                         current_link_up = true;
5552                 }
5553                 goto out;
5554         }
5555
5556         /* Want auto-negotiation.  */
5557         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5558
5559         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5560         if (flowctrl & ADVERTISE_1000XPAUSE)
5561                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5562         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5563                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5564
5565         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5566                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5567                     tp->serdes_counter &&
5568                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5569                                     MAC_STATUS_RCVD_CFG)) ==
5570                      MAC_STATUS_PCS_SYNCED)) {
5571                         tp->serdes_counter--;
5572                         current_link_up = true;
5573                         goto out;
5574                 }
5575 restart_autoneg:
5576                 if (workaround)
5577                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5578                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5579                 udelay(5);
5580                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5581
5582                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5583                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5585                                  MAC_STATUS_SIGNAL_DET)) {
5586                 sg_dig_status = tr32(SG_DIG_STATUS);
5587                 mac_status = tr32(MAC_STATUS);
5588
5589                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5590                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5591                         u32 local_adv = 0, remote_adv = 0;
5592
5593                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5594                                 local_adv |= ADVERTISE_1000XPAUSE;
5595                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5596                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5597
5598                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5599                                 remote_adv |= LPA_1000XPAUSE;
5600                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5601                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5602
5603                         tp->link_config.rmt_adv =
5604                                            mii_adv_to_ethtool_adv_x(remote_adv);
5605
5606                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5607                         current_link_up = true;
5608                         tp->serdes_counter = 0;
5609                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5610                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5611                         if (tp->serdes_counter)
5612                                 tp->serdes_counter--;
5613                         else {
5614                                 if (workaround) {
5615                                         u32 val = serdes_cfg;
5616
5617                                         if (port_a)
5618                                                 val |= 0xc010000;
5619                                         else
5620                                                 val |= 0x4010000;
5621
5622                                         tw32_f(MAC_SERDES_CFG, val);
5623                                 }
5624
5625                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5626                                 udelay(40);
5627
5628                                 /* Link parallel detection - link is up */
5629                                 /* only if we have PCS_SYNC and not */
5630                                 /* receiving config code words */
5631                                 mac_status = tr32(MAC_STATUS);
5632                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5633                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5634                                         tg3_setup_flow_control(tp, 0, 0);
5635                                         current_link_up = true;
5636                                         tp->phy_flags |=
5637                                                 TG3_PHYFLG_PARALLEL_DETECT;
5638                                         tp->serdes_counter =
5639                                                 SERDES_PARALLEL_DET_TIMEOUT;
5640                                 } else
5641                                         goto restart_autoneg;
5642                         }
5643                 }
5644         } else {
5645                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5646                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5647         }
5648
5649 out:
5650         return current_link_up;
5651 }
5652
5653 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5654 {
5655         bool current_link_up = false;
5656
5657         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5658                 goto out;
5659
5660         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5661                 u32 txflags, rxflags;
5662                 int i;
5663
5664                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5665                         u32 local_adv = 0, remote_adv = 0;
5666
5667                         if (txflags & ANEG_CFG_PS1)
5668                                 local_adv |= ADVERTISE_1000XPAUSE;
5669                         if (txflags & ANEG_CFG_PS2)
5670                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5671
5672                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5673                                 remote_adv |= LPA_1000XPAUSE;
5674                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5675                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5676
5677                         tp->link_config.rmt_adv =
5678                                            mii_adv_to_ethtool_adv_x(remote_adv);
5679
5680                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5681
5682                         current_link_up = true;
5683                 }
5684                 for (i = 0; i < 30; i++) {
5685                         udelay(20);
5686                         tw32_f(MAC_STATUS,
5687                                (MAC_STATUS_SYNC_CHANGED |
5688                                 MAC_STATUS_CFG_CHANGED));
5689                         udelay(40);
5690                         if ((tr32(MAC_STATUS) &
5691                              (MAC_STATUS_SYNC_CHANGED |
5692                               MAC_STATUS_CFG_CHANGED)) == 0)
5693                                 break;
5694                 }
5695
5696                 mac_status = tr32(MAC_STATUS);
5697                 if (!current_link_up &&
5698                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5699                     !(mac_status & MAC_STATUS_RCVD_CFG))
5700                         current_link_up = true;
5701         } else {
5702                 tg3_setup_flow_control(tp, 0, 0);
5703
5704                 /* Forcing 1000FD link up. */
5705                 current_link_up = true;
5706
5707                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5708                 udelay(40);
5709
5710                 tw32_f(MAC_MODE, tp->mac_mode);
5711                 udelay(40);
5712         }
5713
5714 out:
5715         return current_link_up;
5716 }
5717
5718 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5719 {
5720         u32 orig_pause_cfg;
5721         u16 orig_active_speed;
5722         u8 orig_active_duplex;
5723         u32 mac_status;
5724         bool current_link_up;
5725         int i;
5726
5727         orig_pause_cfg = tp->link_config.active_flowctrl;
5728         orig_active_speed = tp->link_config.active_speed;
5729         orig_active_duplex = tp->link_config.active_duplex;
5730
5731         if (!tg3_flag(tp, HW_AUTONEG) &&
5732             tp->link_up &&
5733             tg3_flag(tp, INIT_COMPLETE)) {
5734                 mac_status = tr32(MAC_STATUS);
5735                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5736                                MAC_STATUS_SIGNAL_DET |
5737                                MAC_STATUS_CFG_CHANGED |
5738                                MAC_STATUS_RCVD_CFG);
5739                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5740                                    MAC_STATUS_SIGNAL_DET)) {
5741                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5742                                             MAC_STATUS_CFG_CHANGED));
5743                         return 0;
5744                 }
5745         }
5746
5747         tw32_f(MAC_TX_AUTO_NEG, 0);
5748
5749         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5750         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5751         tw32_f(MAC_MODE, tp->mac_mode);
5752         udelay(40);
5753
5754         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5755                 tg3_init_bcm8002(tp);
5756
5757         /* Enable link change event even when serdes polling.  */
5758         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5759         udelay(40);
5760
5761         current_link_up = false;
5762         tp->link_config.rmt_adv = 0;
5763         mac_status = tr32(MAC_STATUS);
5764
5765         if (tg3_flag(tp, HW_AUTONEG))
5766                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5767         else
5768                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5769
5770         tp->napi[0].hw_status->status =
5771                 (SD_STATUS_UPDATED |
5772                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5773
5774         for (i = 0; i < 100; i++) {
5775                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5776                                     MAC_STATUS_CFG_CHANGED));
5777                 udelay(5);
5778                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5779                                          MAC_STATUS_CFG_CHANGED |
5780                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5781                         break;
5782         }
5783
5784         mac_status = tr32(MAC_STATUS);
5785         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5786                 current_link_up = false;
5787                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5788                     tp->serdes_counter == 0) {
5789                         tw32_f(MAC_MODE, (tp->mac_mode |
5790                                           MAC_MODE_SEND_CONFIGS));
5791                         udelay(1);
5792                         tw32_f(MAC_MODE, tp->mac_mode);
5793                 }
5794         }
5795
5796         if (current_link_up) {
5797                 tp->link_config.active_speed = SPEED_1000;
5798                 tp->link_config.active_duplex = DUPLEX_FULL;
5799                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800                                     LED_CTRL_LNKLED_OVERRIDE |
5801                                     LED_CTRL_1000MBPS_ON));
5802         } else {
5803                 tp->link_config.active_speed = SPEED_UNKNOWN;
5804                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5805                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5806                                     LED_CTRL_LNKLED_OVERRIDE |
5807                                     LED_CTRL_TRAFFIC_OVERRIDE));
5808         }
5809
5810         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5811                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5812                 if (orig_pause_cfg != now_pause_cfg ||
5813                     orig_active_speed != tp->link_config.active_speed ||
5814                     orig_active_duplex != tp->link_config.active_duplex)
5815                         tg3_link_report(tp);
5816         }
5817
5818         return 0;
5819 }
5820
5821 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5822 {
5823         int err = 0;
5824         u32 bmsr, bmcr;
5825         u16 current_speed = SPEED_UNKNOWN;
5826         u8 current_duplex = DUPLEX_UNKNOWN;
5827         bool current_link_up = false;
5828         u32 local_adv, remote_adv, sgsr;
5829
5830         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5831              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5832              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5833              (sgsr & SERDES_TG3_SGMII_MODE)) {
5834
5835                 if (force_reset)
5836                         tg3_phy_reset(tp);
5837
5838                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5839
5840                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5841                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5842                 } else {
5843                         current_link_up = true;
5844                         if (sgsr & SERDES_TG3_SPEED_1000) {
5845                                 current_speed = SPEED_1000;
5846                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5848                                 current_speed = SPEED_100;
5849                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5850                         } else {
5851                                 current_speed = SPEED_10;
5852                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5853                         }
5854
5855                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5856                                 current_duplex = DUPLEX_FULL;
5857                         else
5858                                 current_duplex = DUPLEX_HALF;
5859                 }
5860
5861                 tw32_f(MAC_MODE, tp->mac_mode);
5862                 udelay(40);
5863
5864                 tg3_clear_mac_status(tp);
5865
5866                 goto fiber_setup_done;
5867         }
5868
5869         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5870         tw32_f(MAC_MODE, tp->mac_mode);
5871         udelay(40);
5872
5873         tg3_clear_mac_status(tp);
5874
5875         if (force_reset)
5876                 tg3_phy_reset(tp);
5877
5878         tp->link_config.rmt_adv = 0;
5879
5880         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5881         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5882         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5883                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5884                         bmsr |= BMSR_LSTATUS;
5885                 else
5886                         bmsr &= ~BMSR_LSTATUS;
5887         }
5888
5889         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5890
5891         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5892             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5893                 /* do nothing, just check for link up at the end */
5894         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5895                 u32 adv, newadv;
5896
5897                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5898                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5899                                  ADVERTISE_1000XPAUSE |
5900                                  ADVERTISE_1000XPSE_ASYM |
5901                                  ADVERTISE_SLCT);
5902
5903                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5904                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5905
5906                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5907                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5908                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5909                         tg3_writephy(tp, MII_BMCR, bmcr);
5910
5911                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5912                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5913                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5914
5915                         return err;
5916                 }
5917         } else {
5918                 u32 new_bmcr;
5919
5920                 bmcr &= ~BMCR_SPEED1000;
5921                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5922
5923                 if (tp->link_config.duplex == DUPLEX_FULL)
5924                         new_bmcr |= BMCR_FULLDPLX;
5925
5926                 if (new_bmcr != bmcr) {
5927                         /* BMCR_SPEED1000 is a reserved bit that needs
5928                          * to be set on write.
5929                          */
5930                         new_bmcr |= BMCR_SPEED1000;
5931
5932                         /* Force a linkdown */
5933                         if (tp->link_up) {
5934                                 u32 adv;
5935
5936                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5937                                 adv &= ~(ADVERTISE_1000XFULL |
5938                                          ADVERTISE_1000XHALF |
5939                                          ADVERTISE_SLCT);
5940                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5941                                 tg3_writephy(tp, MII_BMCR, bmcr |
5942                                                            BMCR_ANRESTART |
5943                                                            BMCR_ANENABLE);
5944                                 udelay(10);
5945                                 tg3_carrier_off(tp);
5946                         }
5947                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5948                         bmcr = new_bmcr;
5949                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5950                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5951                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5952                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5953                                         bmsr |= BMSR_LSTATUS;
5954                                 else
5955                                         bmsr &= ~BMSR_LSTATUS;
5956                         }
5957                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5958                 }
5959         }
5960
5961         if (bmsr & BMSR_LSTATUS) {
5962                 current_speed = SPEED_1000;
5963                 current_link_up = true;
5964                 if (bmcr & BMCR_FULLDPLX)
5965                         current_duplex = DUPLEX_FULL;
5966                 else
5967                         current_duplex = DUPLEX_HALF;
5968
5969                 local_adv = 0;
5970                 remote_adv = 0;
5971
5972                 if (bmcr & BMCR_ANENABLE) {
5973                         u32 common;
5974
5975                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5976                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5977                         common = local_adv & remote_adv;
5978                         if (common & (ADVERTISE_1000XHALF |
5979                                       ADVERTISE_1000XFULL)) {
5980                                 if (common & ADVERTISE_1000XFULL)
5981                                         current_duplex = DUPLEX_FULL;
5982                                 else
5983                                         current_duplex = DUPLEX_HALF;
5984
5985                                 tp->link_config.rmt_adv =
5986                                            mii_adv_to_ethtool_adv_x(remote_adv);
5987                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5988                                 /* Link is up via parallel detect */
5989                         } else {
5990                                 current_link_up = false;
5991                         }
5992                 }
5993         }
5994
5995 fiber_setup_done:
5996         if (current_link_up && current_duplex == DUPLEX_FULL)
5997                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5998
5999         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6000         if (tp->link_config.active_duplex == DUPLEX_HALF)
6001                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6002
6003         tw32_f(MAC_MODE, tp->mac_mode);
6004         udelay(40);
6005
6006         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6007
6008         tp->link_config.active_speed = current_speed;
6009         tp->link_config.active_duplex = current_duplex;
6010
6011         tg3_test_and_report_link_chg(tp, current_link_up);
6012         return err;
6013 }
6014
6015 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6016 {
6017         if (tp->serdes_counter) {
6018                 /* Give autoneg time to complete. */
6019                 tp->serdes_counter--;
6020                 return;
6021         }
6022
6023         if (!tp->link_up &&
6024             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6025                 u32 bmcr;
6026
6027                 tg3_readphy(tp, MII_BMCR, &bmcr);
6028                 if (bmcr & BMCR_ANENABLE) {
6029                         u32 phy1, phy2;
6030
6031                         /* Select shadow register 0x1f */
6032                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6033                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6034
6035                         /* Select expansion interrupt status register */
6036                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037                                          MII_TG3_DSP_EXP1_INT_STAT);
6038                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040
6041                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6042                                 /* We have signal detect and not receiving
6043                                  * config code words, link is up by parallel
6044                                  * detection.
6045                                  */
6046
6047                                 bmcr &= ~BMCR_ANENABLE;
6048                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6049                                 tg3_writephy(tp, MII_BMCR, bmcr);
6050                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6051                         }
6052                 }
6053         } else if (tp->link_up &&
6054                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6055                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6056                 u32 phy2;
6057
6058                 /* Select expansion interrupt status register */
6059                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6060                                  MII_TG3_DSP_EXP1_INT_STAT);
6061                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6062                 if (phy2 & 0x20) {
6063                         u32 bmcr;
6064
6065                         /* Config code words received, turn on autoneg. */
6066                         tg3_readphy(tp, MII_BMCR, &bmcr);
6067                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6068
6069                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6070
6071                 }
6072         }
6073 }
6074
6075 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6076 {
6077         u32 val;
6078         int err;
6079
6080         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6081                 err = tg3_setup_fiber_phy(tp, force_reset);
6082         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6083                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6084         else
6085                 err = tg3_setup_copper_phy(tp, force_reset);
6086
6087         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6088                 u32 scale;
6089
6090                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6091                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6092                         scale = 65;
6093                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6094                         scale = 6;
6095                 else
6096                         scale = 12;
6097
6098                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6099                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6100                 tw32(GRC_MISC_CFG, val);
6101         }
6102
6103         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6104               (6 << TX_LENGTHS_IPG_SHIFT);
6105         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6106             tg3_asic_rev(tp) == ASIC_REV_5762)
6107                 val |= tr32(MAC_TX_LENGTHS) &
6108                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6109                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6110
6111         if (tp->link_config.active_speed == SPEED_1000 &&
6112             tp->link_config.active_duplex == DUPLEX_HALF)
6113                 tw32(MAC_TX_LENGTHS, val |
6114                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6115         else
6116                 tw32(MAC_TX_LENGTHS, val |
6117                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6118
6119         if (!tg3_flag(tp, 5705_PLUS)) {
6120                 if (tp->link_up) {
6121                         tw32(HOSTCC_STAT_COAL_TICKS,
6122                              tp->coal.stats_block_coalesce_usecs);
6123                 } else {
6124                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6125                 }
6126         }
6127
6128         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6129                 val = tr32(PCIE_PWR_MGMT_THRESH);
6130                 if (!tp->link_up)
6131                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6132                               tp->pwrmgmt_thresh;
6133                 else
6134                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6135                 tw32(PCIE_PWR_MGMT_THRESH, val);
6136         }
6137
6138         return err;
6139 }
6140
6141 /* tp->lock must be held */
6142 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6143 {
6144         u64 stamp;
6145
6146         ptp_read_system_prets(sts);
6147         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6148         ptp_read_system_postts(sts);
6149         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6150
6151         return stamp;
6152 }
6153
6154 /* tp->lock must be held */
6155 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6156 {
6157         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6158
6159         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6160         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6161         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6162         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6163 }
6164
6165 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6166 static inline void tg3_full_unlock(struct tg3 *tp);
6167 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6168 {
6169         struct tg3 *tp = netdev_priv(dev);
6170
6171         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6172                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6173                                 SOF_TIMESTAMPING_SOFTWARE;
6174
6175         if (tg3_flag(tp, PTP_CAPABLE)) {
6176                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6177                                         SOF_TIMESTAMPING_RX_HARDWARE |
6178                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6179         }
6180
6181         if (tp->ptp_clock)
6182                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6183         else
6184                 info->phc_index = -1;
6185
6186         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6187
6188         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6189                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6190                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6191                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6192         return 0;
6193 }
6194
6195 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6196 {
6197         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6198         bool neg_adj = false;
6199         u32 correction = 0;
6200
6201         if (ppb < 0) {
6202                 neg_adj = true;
6203                 ppb = -ppb;
6204         }
6205
6206         /* Frequency adjustment is performed using hardware with a 24 bit
6207          * accumulator and a programmable correction value. On each clk, the
6208          * correction value gets added to the accumulator and when it
6209          * overflows, the time counter is incremented/decremented.
6210          *
6211          * So conversion from ppb to correction value is
6212          *              ppb * (1 << 24) / 1000000000
6213          */
6214         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6215                      TG3_EAV_REF_CLK_CORRECT_MASK;
6216
6217         tg3_full_lock(tp, 0);
6218
6219         if (correction)
6220                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6221                      TG3_EAV_REF_CLK_CORRECT_EN |
6222                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6223         else
6224                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6225
6226         tg3_full_unlock(tp);
6227
6228         return 0;
6229 }
6230
6231 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6232 {
6233         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6234
6235         tg3_full_lock(tp, 0);
6236         tp->ptp_adjust += delta;
6237         tg3_full_unlock(tp);
6238
6239         return 0;
6240 }
6241
6242 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6243                             struct ptp_system_timestamp *sts)
6244 {
6245         u64 ns;
6246         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247
6248         tg3_full_lock(tp, 0);
6249         ns = tg3_refclk_read(tp, sts);
6250         ns += tp->ptp_adjust;
6251         tg3_full_unlock(tp);
6252
6253         *ts = ns_to_timespec64(ns);
6254
6255         return 0;
6256 }
6257
6258 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6259                            const struct timespec64 *ts)
6260 {
6261         u64 ns;
6262         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6263
6264         ns = timespec64_to_ns(ts);
6265
6266         tg3_full_lock(tp, 0);
6267         tg3_refclk_write(tp, ns);
6268         tp->ptp_adjust = 0;
6269         tg3_full_unlock(tp);
6270
6271         return 0;
6272 }
6273
6274 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6275                           struct ptp_clock_request *rq, int on)
6276 {
6277         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6278         u32 clock_ctl;
6279         int rval = 0;
6280
6281         switch (rq->type) {
6282         case PTP_CLK_REQ_PEROUT:
6283                 if (rq->perout.index != 0)
6284                         return -EINVAL;
6285
6286                 tg3_full_lock(tp, 0);
6287                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6288                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6289
6290                 if (on) {
6291                         u64 nsec;
6292
6293                         nsec = rq->perout.start.sec * 1000000000ULL +
6294                                rq->perout.start.nsec;
6295
6296                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6297                                 netdev_warn(tp->dev,
6298                                             "Device supports only a one-shot timesync output, period must be 0\n");
6299                                 rval = -EINVAL;
6300                                 goto err_out;
6301                         }
6302
6303                         if (nsec & (1ULL << 63)) {
6304                                 netdev_warn(tp->dev,
6305                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6306                                 rval = -EINVAL;
6307                                 goto err_out;
6308                         }
6309
6310                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6311                         tw32(TG3_EAV_WATCHDOG0_MSB,
6312                              TG3_EAV_WATCHDOG0_EN |
6313                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6314
6315                         tw32(TG3_EAV_REF_CLCK_CTL,
6316                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6317                 } else {
6318                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6319                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6320                 }
6321
6322 err_out:
6323                 tg3_full_unlock(tp);
6324                 return rval;
6325
6326         default:
6327                 break;
6328         }
6329
6330         return -EOPNOTSUPP;
6331 }
6332
6333 static const struct ptp_clock_info tg3_ptp_caps = {
6334         .owner          = THIS_MODULE,
6335         .name           = "tg3 clock",
6336         .max_adj        = 250000000,
6337         .n_alarm        = 0,
6338         .n_ext_ts       = 0,
6339         .n_per_out      = 1,
6340         .n_pins         = 0,
6341         .pps            = 0,
6342         .adjfreq        = tg3_ptp_adjfreq,
6343         .adjtime        = tg3_ptp_adjtime,
6344         .gettimex64     = tg3_ptp_gettimex,
6345         .settime64      = tg3_ptp_settime,
6346         .enable         = tg3_ptp_enable,
6347 };
6348
6349 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6350                                      struct skb_shared_hwtstamps *timestamp)
6351 {
6352         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6353         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6354                                            tp->ptp_adjust);
6355 }
6356
6357 /* tp->lock must be held */
6358 static void tg3_ptp_init(struct tg3 *tp)
6359 {
6360         if (!tg3_flag(tp, PTP_CAPABLE))
6361                 return;
6362
6363         /* Initialize the hardware clock to the system time. */
6364         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6365         tp->ptp_adjust = 0;
6366         tp->ptp_info = tg3_ptp_caps;
6367 }
6368
6369 /* tp->lock must be held */
6370 static void tg3_ptp_resume(struct tg3 *tp)
6371 {
6372         if (!tg3_flag(tp, PTP_CAPABLE))
6373                 return;
6374
6375         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6376         tp->ptp_adjust = 0;
6377 }
6378
6379 static void tg3_ptp_fini(struct tg3 *tp)
6380 {
6381         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6382                 return;
6383
6384         ptp_clock_unregister(tp->ptp_clock);
6385         tp->ptp_clock = NULL;
6386         tp->ptp_adjust = 0;
6387 }
6388
6389 static inline int tg3_irq_sync(struct tg3 *tp)
6390 {
6391         return tp->irq_sync;
6392 }
6393
6394 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6395 {
6396         int i;
6397
6398         dst = (u32 *)((u8 *)dst + off);
6399         for (i = 0; i < len; i += sizeof(u32))
6400                 *dst++ = tr32(off + i);
6401 }
6402
6403 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6404 {
6405         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6406         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6407         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6408         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6409         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6410         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6411         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6412         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6413         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6414         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6415         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6416         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6417         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6418         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6419         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6420         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6421         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6422         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6423         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6424
6425         if (tg3_flag(tp, SUPPORT_MSIX))
6426                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6427
6428         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6429         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6430         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6431         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6432         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6433         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6434         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6435         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6436
6437         if (!tg3_flag(tp, 5705_PLUS)) {
6438                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6439                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6440                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6441         }
6442
6443         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6444         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6445         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6446         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6447         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6448
6449         if (tg3_flag(tp, NVRAM))
6450                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6451 }
6452
6453 static void tg3_dump_state(struct tg3 *tp)
6454 {
6455         int i;
6456         u32 *regs;
6457
6458         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6459         if (!regs)
6460                 return;
6461
6462         if (tg3_flag(tp, PCI_EXPRESS)) {
6463                 /* Read up to but not including private PCI registers */
6464                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6465                         regs[i / sizeof(u32)] = tr32(i);
6466         } else
6467                 tg3_dump_legacy_regs(tp, regs);
6468
6469         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6470                 if (!regs[i + 0] && !regs[i + 1] &&
6471                     !regs[i + 2] && !regs[i + 3])
6472                         continue;
6473
6474                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6475                            i * 4,
6476                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6477         }
6478
6479         kfree(regs);
6480
6481         for (i = 0; i < tp->irq_cnt; i++) {
6482                 struct tg3_napi *tnapi = &tp->napi[i];
6483
6484                 /* SW status block */
6485                 netdev_err(tp->dev,
6486                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6487                            i,
6488                            tnapi->hw_status->status,
6489                            tnapi->hw_status->status_tag,
6490                            tnapi->hw_status->rx_jumbo_consumer,
6491                            tnapi->hw_status->rx_consumer,
6492                            tnapi->hw_status->rx_mini_consumer,
6493                            tnapi->hw_status->idx[0].rx_producer,
6494                            tnapi->hw_status->idx[0].tx_consumer);
6495
6496                 netdev_err(tp->dev,
6497                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6498                            i,
6499                            tnapi->last_tag, tnapi->last_irq_tag,
6500                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6501                            tnapi->rx_rcb_ptr,
6502                            tnapi->prodring.rx_std_prod_idx,
6503                            tnapi->prodring.rx_std_cons_idx,
6504                            tnapi->prodring.rx_jmb_prod_idx,
6505                            tnapi->prodring.rx_jmb_cons_idx);
6506         }
6507 }
6508
6509 /* This is called whenever we suspect that the system chipset is re-
6510  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6511  * is bogus tx completions. We try to recover by setting the
6512  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6513  * in the workqueue.
6514  */
6515 static void tg3_tx_recover(struct tg3 *tp)
6516 {
6517         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6518                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6519
6520         netdev_warn(tp->dev,
6521                     "The system may be re-ordering memory-mapped I/O "
6522                     "cycles to the network device, attempting to recover. "
6523                     "Please report the problem to the driver maintainer "
6524                     "and include system chipset information.\n");
6525
6526         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6527 }
6528
6529 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6530 {
6531         /* Tell compiler to fetch tx indices from memory. */
6532         barrier();
6533         return tnapi->tx_pending -
6534                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6535 }
6536
6537 /* Tigon3 never reports partial packet sends.  So we do not
6538  * need special logic to handle SKBs that have not had all
6539  * of their frags sent yet, like SunGEM does.
6540  */
6541 static void tg3_tx(struct tg3_napi *tnapi)
6542 {
6543         struct tg3 *tp = tnapi->tp;
6544         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6545         u32 sw_idx = tnapi->tx_cons;
6546         struct netdev_queue *txq;
6547         int index = tnapi - tp->napi;
6548         unsigned int pkts_compl = 0, bytes_compl = 0;
6549
6550         if (tg3_flag(tp, ENABLE_TSS))
6551                 index--;
6552
6553         txq = netdev_get_tx_queue(tp->dev, index);
6554
6555         while (sw_idx != hw_idx) {
6556                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6557                 struct sk_buff *skb = ri->skb;
6558                 int i, tx_bug = 0;
6559
6560                 if (unlikely(skb == NULL)) {
6561                         tg3_tx_recover(tp);
6562                         return;
6563                 }
6564
6565                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6566                         struct skb_shared_hwtstamps timestamp;
6567                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6568                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6569
6570                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6571
6572                         skb_tstamp_tx(skb, &timestamp);
6573                 }
6574
6575                 pci_unmap_single(tp->pdev,
6576                                  dma_unmap_addr(ri, mapping),
6577                                  skb_headlen(skb),
6578                                  PCI_DMA_TODEVICE);
6579
6580                 ri->skb = NULL;
6581
6582                 while (ri->fragmented) {
6583                         ri->fragmented = false;
6584                         sw_idx = NEXT_TX(sw_idx);
6585                         ri = &tnapi->tx_buffers[sw_idx];
6586                 }
6587
6588                 sw_idx = NEXT_TX(sw_idx);
6589
6590                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6591                         ri = &tnapi->tx_buffers[sw_idx];
6592                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6593                                 tx_bug = 1;
6594
6595                         pci_unmap_page(tp->pdev,
6596                                        dma_unmap_addr(ri, mapping),
6597                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6598                                        PCI_DMA_TODEVICE);
6599
6600                         while (ri->fragmented) {
6601                                 ri->fragmented = false;
6602                                 sw_idx = NEXT_TX(sw_idx);
6603                                 ri = &tnapi->tx_buffers[sw_idx];
6604                         }
6605
6606                         sw_idx = NEXT_TX(sw_idx);
6607                 }
6608
6609                 pkts_compl++;
6610                 bytes_compl += skb->len;
6611
6612                 dev_consume_skb_any(skb);
6613
6614                 if (unlikely(tx_bug)) {
6615                         tg3_tx_recover(tp);
6616                         return;
6617                 }
6618         }
6619
6620         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6621
6622         tnapi->tx_cons = sw_idx;
6623
6624         /* Need to make the tx_cons update visible to tg3_start_xmit()
6625          * before checking for netif_queue_stopped().  Without the
6626          * memory barrier, there is a small possibility that tg3_start_xmit()
6627          * will miss it and cause the queue to be stopped forever.
6628          */
6629         smp_mb();
6630
6631         if (unlikely(netif_tx_queue_stopped(txq) &&
6632                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6633                 __netif_tx_lock(txq, smp_processor_id());
6634                 if (netif_tx_queue_stopped(txq) &&
6635                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6636                         netif_tx_wake_queue(txq);
6637                 __netif_tx_unlock(txq);
6638         }
6639 }
6640
6641 static void tg3_frag_free(bool is_frag, void *data)
6642 {
6643         if (is_frag)
6644                 skb_free_frag(data);
6645         else
6646                 kfree(data);
6647 }
6648
6649 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6650 {
6651         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6652                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6653
6654         if (!ri->data)
6655                 return;
6656
6657         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6658                          map_sz, PCI_DMA_FROMDEVICE);
6659         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6660         ri->data = NULL;
6661 }
6662
6663
6664 /* Returns size of skb allocated or < 0 on error.
6665  *
6666  * We only need to fill in the address because the other members
6667  * of the RX descriptor are invariant, see tg3_init_rings.
6668  *
6669  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6670  * posting buffers we only dirty the first cache line of the RX
6671  * descriptor (containing the address).  Whereas for the RX status
6672  * buffers the cpu only reads the last cacheline of the RX descriptor
6673  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6674  */
6675 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6676                              u32 opaque_key, u32 dest_idx_unmasked,
6677                              unsigned int *frag_size)
6678 {
6679         struct tg3_rx_buffer_desc *desc;
6680         struct ring_info *map;
6681         u8 *data;
6682         dma_addr_t mapping;
6683         int skb_size, data_size, dest_idx;
6684
6685         switch (opaque_key) {
6686         case RXD_OPAQUE_RING_STD:
6687                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6688                 desc = &tpr->rx_std[dest_idx];
6689                 map = &tpr->rx_std_buffers[dest_idx];
6690                 data_size = tp->rx_pkt_map_sz;
6691                 break;
6692
6693         case RXD_OPAQUE_RING_JUMBO:
6694                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6695                 desc = &tpr->rx_jmb[dest_idx].std;
6696                 map = &tpr->rx_jmb_buffers[dest_idx];
6697                 data_size = TG3_RX_JMB_MAP_SZ;
6698                 break;
6699
6700         default:
6701                 return -EINVAL;
6702         }
6703
6704         /* Do not overwrite any of the map or rp information
6705          * until we are sure we can commit to a new buffer.
6706          *
6707          * Callers depend upon this behavior and assume that
6708          * we leave everything unchanged if we fail.
6709          */
6710         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6711                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6712         if (skb_size <= PAGE_SIZE) {
6713                 data = netdev_alloc_frag(skb_size);
6714                 *frag_size = skb_size;
6715         } else {
6716                 data = kmalloc(skb_size, GFP_ATOMIC);
6717                 *frag_size = 0;
6718         }
6719         if (!data)
6720                 return -ENOMEM;
6721
6722         mapping = pci_map_single(tp->pdev,
6723                                  data + TG3_RX_OFFSET(tp),
6724                                  data_size,
6725                                  PCI_DMA_FROMDEVICE);
6726         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6727                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6728                 return -EIO;
6729         }
6730
6731         map->data = data;
6732         dma_unmap_addr_set(map, mapping, mapping);
6733
6734         desc->addr_hi = ((u64)mapping >> 32);
6735         desc->addr_lo = ((u64)mapping & 0xffffffff);
6736
6737         return data_size;
6738 }
6739
6740 /* We only need to move over in the address because the other
6741  * members of the RX descriptor are invariant.  See notes above
6742  * tg3_alloc_rx_data for full details.
6743  */
6744 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6745                            struct tg3_rx_prodring_set *dpr,
6746                            u32 opaque_key, int src_idx,
6747                            u32 dest_idx_unmasked)
6748 {
6749         struct tg3 *tp = tnapi->tp;
6750         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6751         struct ring_info *src_map, *dest_map;
6752         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6753         int dest_idx;
6754
6755         switch (opaque_key) {
6756         case RXD_OPAQUE_RING_STD:
6757                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6758                 dest_desc = &dpr->rx_std[dest_idx];
6759                 dest_map = &dpr->rx_std_buffers[dest_idx];
6760                 src_desc = &spr->rx_std[src_idx];
6761                 src_map = &spr->rx_std_buffers[src_idx];
6762                 break;
6763
6764         case RXD_OPAQUE_RING_JUMBO:
6765                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6766                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6767                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6768                 src_desc = &spr->rx_jmb[src_idx].std;
6769                 src_map = &spr->rx_jmb_buffers[src_idx];
6770                 break;
6771
6772         default:
6773                 return;
6774         }
6775
6776         dest_map->data = src_map->data;
6777         dma_unmap_addr_set(dest_map, mapping,
6778                            dma_unmap_addr(src_map, mapping));
6779         dest_desc->addr_hi = src_desc->addr_hi;
6780         dest_desc->addr_lo = src_desc->addr_lo;
6781
6782         /* Ensure that the update to the skb happens after the physical
6783          * addresses have been transferred to the new BD location.
6784          */
6785         smp_wmb();
6786
6787         src_map->data = NULL;
6788 }
6789
6790 /* The RX ring scheme is composed of multiple rings which post fresh
6791  * buffers to the chip, and one special ring the chip uses to report
6792  * status back to the host.
6793  *
6794  * The special ring reports the status of received packets to the
6795  * host.  The chip does not write into the original descriptor the
6796  * RX buffer was obtained from.  The chip simply takes the original
6797  * descriptor as provided by the host, updates the status and length
6798  * field, then writes this into the next status ring entry.
6799  *
6800  * Each ring the host uses to post buffers to the chip is described
6801  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6802  * it is first placed into the on-chip ram.  When the packet's length
6803  * is known, it walks down the TG3_BDINFO entries to select the ring.
6804  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6805  * which is within the range of the new packet's length is chosen.
6806  *
6807  * The "separate ring for rx status" scheme may sound queer, but it makes
6808  * sense from a cache coherency perspective.  If only the host writes
6809  * to the buffer post rings, and only the chip writes to the rx status
6810  * rings, then cache lines never move beyond shared-modified state.
6811  * If both the host and chip were to write into the same ring, cache line
6812  * eviction could occur since both entities want it in an exclusive state.
6813  */
6814 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6815 {
6816         struct tg3 *tp = tnapi->tp;
6817         u32 work_mask, rx_std_posted = 0;
6818         u32 std_prod_idx, jmb_prod_idx;
6819         u32 sw_idx = tnapi->rx_rcb_ptr;
6820         u16 hw_idx;
6821         int received;
6822         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6823
6824         hw_idx = *(tnapi->rx_rcb_prod_idx);
6825         /*
6826          * We need to order the read of hw_idx and the read of
6827          * the opaque cookie.
6828          */
6829         rmb();
6830         work_mask = 0;
6831         received = 0;
6832         std_prod_idx = tpr->rx_std_prod_idx;
6833         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6834         while (sw_idx != hw_idx && budget > 0) {
6835                 struct ring_info *ri;
6836                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6837                 unsigned int len;
6838                 struct sk_buff *skb;
6839                 dma_addr_t dma_addr;
6840                 u32 opaque_key, desc_idx, *post_ptr;
6841                 u8 *data;
6842                 u64 tstamp = 0;
6843
6844                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6845                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6846                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6847                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6848                         dma_addr = dma_unmap_addr(ri, mapping);
6849                         data = ri->data;
6850                         post_ptr = &std_prod_idx;
6851                         rx_std_posted++;
6852                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6853                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6854                         dma_addr = dma_unmap_addr(ri, mapping);
6855                         data = ri->data;
6856                         post_ptr = &jmb_prod_idx;
6857                 } else
6858                         goto next_pkt_nopost;
6859
6860                 work_mask |= opaque_key;
6861
6862                 if (desc->err_vlan & RXD_ERR_MASK) {
6863                 drop_it:
6864                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6865                                        desc_idx, *post_ptr);
6866                 drop_it_no_recycle:
6867                         /* Other statistics kept track of by card. */
6868                         tp->rx_dropped++;
6869                         goto next_pkt;
6870                 }
6871
6872                 prefetch(data + TG3_RX_OFFSET(tp));
6873                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6874                       ETH_FCS_LEN;
6875
6876                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6877                      RXD_FLAG_PTPSTAT_PTPV1 ||
6878                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6879                      RXD_FLAG_PTPSTAT_PTPV2) {
6880                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6881                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6882                 }
6883
6884                 if (len > TG3_RX_COPY_THRESH(tp)) {
6885                         int skb_size;
6886                         unsigned int frag_size;
6887
6888                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6889                                                     *post_ptr, &frag_size);
6890                         if (skb_size < 0)
6891                                 goto drop_it;
6892
6893                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6894                                          PCI_DMA_FROMDEVICE);
6895
6896                         /* Ensure that the update to the data happens
6897                          * after the usage of the old DMA mapping.
6898                          */
6899                         smp_wmb();
6900
6901                         ri->data = NULL;
6902
6903                         skb = build_skb(data, frag_size);
6904                         if (!skb) {
6905                                 tg3_frag_free(frag_size != 0, data);
6906                                 goto drop_it_no_recycle;
6907                         }
6908                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6909                 } else {
6910                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6911                                        desc_idx, *post_ptr);
6912
6913                         skb = netdev_alloc_skb(tp->dev,
6914                                                len + TG3_RAW_IP_ALIGN);
6915                         if (skb == NULL)
6916                                 goto drop_it_no_recycle;
6917
6918                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6919                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6920                         memcpy(skb->data,
6921                                data + TG3_RX_OFFSET(tp),
6922                                len);
6923                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6924                 }
6925
6926                 skb_put(skb, len);
6927                 if (tstamp)
6928                         tg3_hwclock_to_timestamp(tp, tstamp,
6929                                                  skb_hwtstamps(skb));
6930
6931                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6932                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6933                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6934                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6935                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6936                 else
6937                         skb_checksum_none_assert(skb);
6938
6939                 skb->protocol = eth_type_trans(skb, tp->dev);
6940
6941                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6942                     skb->protocol != htons(ETH_P_8021Q) &&
6943                     skb->protocol != htons(ETH_P_8021AD)) {
6944                         dev_kfree_skb_any(skb);
6945                         goto drop_it_no_recycle;
6946                 }
6947
6948                 if (desc->type_flags & RXD_FLAG_VLAN &&
6949                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6950                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6951                                                desc->err_vlan & RXD_VLAN_MASK);
6952
6953                 napi_gro_receive(&tnapi->napi, skb);
6954
6955                 received++;
6956                 budget--;
6957
6958 next_pkt:
6959                 (*post_ptr)++;
6960
6961                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6962                         tpr->rx_std_prod_idx = std_prod_idx &
6963                                                tp->rx_std_ring_mask;
6964                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6965                                      tpr->rx_std_prod_idx);
6966                         work_mask &= ~RXD_OPAQUE_RING_STD;
6967                         rx_std_posted = 0;
6968                 }
6969 next_pkt_nopost:
6970                 sw_idx++;
6971                 sw_idx &= tp->rx_ret_ring_mask;
6972
6973                 /* Refresh hw_idx to see if there is new work */
6974                 if (sw_idx == hw_idx) {
6975                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6976                         rmb();
6977                 }
6978         }
6979
6980         /* ACK the status ring. */
6981         tnapi->rx_rcb_ptr = sw_idx;
6982         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6983
6984         /* Refill RX ring(s). */
6985         if (!tg3_flag(tp, ENABLE_RSS)) {
6986                 /* Sync BD data before updating mailbox */
6987                 wmb();
6988
6989                 if (work_mask & RXD_OPAQUE_RING_STD) {
6990                         tpr->rx_std_prod_idx = std_prod_idx &
6991                                                tp->rx_std_ring_mask;
6992                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6993                                      tpr->rx_std_prod_idx);
6994                 }
6995                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6996                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6997                                                tp->rx_jmb_ring_mask;
6998                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6999                                      tpr->rx_jmb_prod_idx);
7000                 }
7001         } else if (work_mask) {
7002                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7003                  * updated before the producer indices can be updated.
7004                  */
7005                 smp_wmb();
7006
7007                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7008                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7009
7010                 if (tnapi != &tp->napi[1]) {
7011                         tp->rx_refill = true;
7012                         napi_schedule(&tp->napi[1].napi);
7013                 }
7014         }
7015
7016         return received;
7017 }
7018
7019 static void tg3_poll_link(struct tg3 *tp)
7020 {
7021         /* handle link change and other phy events */
7022         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7023                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7024
7025                 if (sblk->status & SD_STATUS_LINK_CHG) {
7026                         sblk->status = SD_STATUS_UPDATED |
7027                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7028                         spin_lock(&tp->lock);
7029                         if (tg3_flag(tp, USE_PHYLIB)) {
7030                                 tw32_f(MAC_STATUS,
7031                                      (MAC_STATUS_SYNC_CHANGED |
7032                                       MAC_STATUS_CFG_CHANGED |
7033                                       MAC_STATUS_MI_COMPLETION |
7034                                       MAC_STATUS_LNKSTATE_CHANGED));
7035                                 udelay(40);
7036                         } else
7037                                 tg3_setup_phy(tp, false);
7038                         spin_unlock(&tp->lock);
7039                 }
7040         }
7041 }
7042
7043 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7044                                 struct tg3_rx_prodring_set *dpr,
7045                                 struct tg3_rx_prodring_set *spr)
7046 {
7047         u32 si, di, cpycnt, src_prod_idx;
7048         int i, err = 0;
7049
7050         while (1) {
7051                 src_prod_idx = spr->rx_std_prod_idx;
7052
7053                 /* Make sure updates to the rx_std_buffers[] entries and the
7054                  * standard producer index are seen in the correct order.
7055                  */
7056                 smp_rmb();
7057
7058                 if (spr->rx_std_cons_idx == src_prod_idx)
7059                         break;
7060
7061                 if (spr->rx_std_cons_idx < src_prod_idx)
7062                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7063                 else
7064                         cpycnt = tp->rx_std_ring_mask + 1 -
7065                                  spr->rx_std_cons_idx;
7066
7067                 cpycnt = min(cpycnt,
7068                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7069
7070                 si = spr->rx_std_cons_idx;
7071                 di = dpr->rx_std_prod_idx;
7072
7073                 for (i = di; i < di + cpycnt; i++) {
7074                         if (dpr->rx_std_buffers[i].data) {
7075                                 cpycnt = i - di;
7076                                 err = -ENOSPC;
7077                                 break;
7078                         }
7079                 }
7080
7081                 if (!cpycnt)
7082                         break;
7083
7084                 /* Ensure that updates to the rx_std_buffers ring and the
7085                  * shadowed hardware producer ring from tg3_recycle_skb() are
7086                  * ordered correctly WRT the skb check above.
7087                  */
7088                 smp_rmb();
7089
7090                 memcpy(&dpr->rx_std_buffers[di],
7091                        &spr->rx_std_buffers[si],
7092                        cpycnt * sizeof(struct ring_info));
7093
7094                 for (i = 0; i < cpycnt; i++, di++, si++) {
7095                         struct tg3_rx_buffer_desc *sbd, *dbd;
7096                         sbd = &spr->rx_std[si];
7097                         dbd = &dpr->rx_std[di];
7098                         dbd->addr_hi = sbd->addr_hi;
7099                         dbd->addr_lo = sbd->addr_lo;
7100                 }
7101
7102                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7103                                        tp->rx_std_ring_mask;
7104                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7105                                        tp->rx_std_ring_mask;
7106         }
7107
7108         while (1) {
7109                 src_prod_idx = spr->rx_jmb_prod_idx;
7110
7111                 /* Make sure updates to the rx_jmb_buffers[] entries and
7112                  * the jumbo producer index are seen in the correct order.
7113                  */
7114                 smp_rmb();
7115
7116                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7117                         break;
7118
7119                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7120                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7121                 else
7122                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7123                                  spr->rx_jmb_cons_idx;
7124
7125                 cpycnt = min(cpycnt,
7126                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7127
7128                 si = spr->rx_jmb_cons_idx;
7129                 di = dpr->rx_jmb_prod_idx;
7130
7131                 for (i = di; i < di + cpycnt; i++) {
7132                         if (dpr->rx_jmb_buffers[i].data) {
7133                                 cpycnt = i - di;
7134                                 err = -ENOSPC;
7135                                 break;
7136                         }
7137                 }
7138
7139                 if (!cpycnt)
7140                         break;
7141
7142                 /* Ensure that updates to the rx_jmb_buffers ring and the
7143                  * shadowed hardware producer ring from tg3_recycle_skb() are
7144                  * ordered correctly WRT the skb check above.
7145                  */
7146                 smp_rmb();
7147
7148                 memcpy(&dpr->rx_jmb_buffers[di],
7149                        &spr->rx_jmb_buffers[si],
7150                        cpycnt * sizeof(struct ring_info));
7151
7152                 for (i = 0; i < cpycnt; i++, di++, si++) {
7153                         struct tg3_rx_buffer_desc *sbd, *dbd;
7154                         sbd = &spr->rx_jmb[si].std;
7155                         dbd = &dpr->rx_jmb[di].std;
7156                         dbd->addr_hi = sbd->addr_hi;
7157                         dbd->addr_lo = sbd->addr_lo;
7158                 }
7159
7160                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7161                                        tp->rx_jmb_ring_mask;
7162                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7163                                        tp->rx_jmb_ring_mask;
7164         }
7165
7166         return err;
7167 }
7168
7169 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7170 {
7171         struct tg3 *tp = tnapi->tp;
7172
7173         /* run TX completion thread */
7174         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7175                 tg3_tx(tnapi);
7176                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7177                         return work_done;
7178         }
7179
7180         if (!tnapi->rx_rcb_prod_idx)
7181                 return work_done;
7182
7183         /* run RX thread, within the bounds set by NAPI.
7184          * All RX "locking" is done by ensuring outside
7185          * code synchronizes with tg3->napi.poll()
7186          */
7187         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7188                 work_done += tg3_rx(tnapi, budget - work_done);
7189
7190         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7191                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7192                 int i, err = 0;
7193                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7194                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7195
7196                 tp->rx_refill = false;
7197                 for (i = 1; i <= tp->rxq_cnt; i++)
7198                         err |= tg3_rx_prodring_xfer(tp, dpr,
7199                                                     &tp->napi[i].prodring);
7200
7201                 wmb();
7202
7203                 if (std_prod_idx != dpr->rx_std_prod_idx)
7204                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7205                                      dpr->rx_std_prod_idx);
7206
7207                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7208                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7209                                      dpr->rx_jmb_prod_idx);
7210
7211                 if (err)
7212                         tw32_f(HOSTCC_MODE, tp->coal_now);
7213         }
7214
7215         return work_done;
7216 }
7217
7218 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7219 {
7220         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7221                 schedule_work(&tp->reset_task);
7222 }
7223
7224 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7225 {
7226         cancel_work_sync(&tp->reset_task);
7227         tg3_flag_clear(tp, RESET_TASK_PENDING);
7228         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7229 }
7230
7231 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7232 {
7233         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7234         struct tg3 *tp = tnapi->tp;
7235         int work_done = 0;
7236         struct tg3_hw_status *sblk = tnapi->hw_status;
7237
7238         while (1) {
7239                 work_done = tg3_poll_work(tnapi, work_done, budget);
7240
7241                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7242                         goto tx_recovery;
7243
7244                 if (unlikely(work_done >= budget))
7245                         break;
7246
7247                 /* tp->last_tag is used in tg3_int_reenable() below
7248                  * to tell the hw how much work has been processed,
7249                  * so we must read it before checking for more work.
7250                  */
7251                 tnapi->last_tag = sblk->status_tag;
7252                 tnapi->last_irq_tag = tnapi->last_tag;
7253                 rmb();
7254
7255                 /* check for RX/TX work to do */
7256                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7257                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7258
7259                         /* This test here is not race free, but will reduce
7260                          * the number of interrupts by looping again.
7261                          */
7262                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7263                                 continue;
7264
7265                         napi_complete_done(napi, work_done);
7266                         /* Reenable interrupts. */
7267                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7268
7269                         /* This test here is synchronized by napi_schedule()
7270                          * and napi_complete() to close the race condition.
7271                          */
7272                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7273                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7274                                                   HOSTCC_MODE_ENABLE |
7275                                                   tnapi->coal_now);
7276                         }
7277                         break;
7278                 }
7279         }
7280
7281         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7282         return work_done;
7283
7284 tx_recovery:
7285         /* work_done is guaranteed to be less than budget. */
7286         napi_complete(napi);
7287         tg3_reset_task_schedule(tp);
7288         return work_done;
7289 }
7290
7291 static void tg3_process_error(struct tg3 *tp)
7292 {
7293         u32 val;
7294         bool real_error = false;
7295
7296         if (tg3_flag(tp, ERROR_PROCESSED))
7297                 return;
7298
7299         /* Check Flow Attention register */
7300         val = tr32(HOSTCC_FLOW_ATTN);
7301         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7302                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7303                 real_error = true;
7304         }
7305
7306         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7307                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7308                 real_error = true;
7309         }
7310
7311         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7312                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7313                 real_error = true;
7314         }
7315
7316         if (!real_error)
7317                 return;
7318
7319         tg3_dump_state(tp);
7320
7321         tg3_flag_set(tp, ERROR_PROCESSED);
7322         tg3_reset_task_schedule(tp);
7323 }
7324
7325 static int tg3_poll(struct napi_struct *napi, int budget)
7326 {
7327         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7328         struct tg3 *tp = tnapi->tp;
7329         int work_done = 0;
7330         struct tg3_hw_status *sblk = tnapi->hw_status;
7331
7332         while (1) {
7333                 if (sblk->status & SD_STATUS_ERROR)
7334                         tg3_process_error(tp);
7335
7336                 tg3_poll_link(tp);
7337
7338                 work_done = tg3_poll_work(tnapi, work_done, budget);
7339
7340                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7341                         goto tx_recovery;
7342
7343                 if (unlikely(work_done >= budget))
7344                         break;
7345
7346                 if (tg3_flag(tp, TAGGED_STATUS)) {
7347                         /* tp->last_tag is used in tg3_int_reenable() below
7348                          * to tell the hw how much work has been processed,
7349                          * so we must read it before checking for more work.
7350                          */
7351                         tnapi->last_tag = sblk->status_tag;
7352                         tnapi->last_irq_tag = tnapi->last_tag;
7353                         rmb();
7354                 } else
7355                         sblk->status &= ~SD_STATUS_UPDATED;
7356
7357                 if (likely(!tg3_has_work(tnapi))) {
7358                         napi_complete_done(napi, work_done);
7359                         tg3_int_reenable(tnapi);
7360                         break;
7361                 }
7362         }
7363
7364         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7365         return work_done;
7366
7367 tx_recovery:
7368         /* work_done is guaranteed to be less than budget. */
7369         napi_complete(napi);
7370         tg3_reset_task_schedule(tp);
7371         return work_done;
7372 }
7373
7374 static void tg3_napi_disable(struct tg3 *tp)
7375 {
7376         int i;
7377
7378         for (i = tp->irq_cnt - 1; i >= 0; i--)
7379                 napi_disable(&tp->napi[i].napi);
7380 }
7381
7382 static void tg3_napi_enable(struct tg3 *tp)
7383 {
7384         int i;
7385
7386         for (i = 0; i < tp->irq_cnt; i++)
7387                 napi_enable(&tp->napi[i].napi);
7388 }
7389
7390 static void tg3_napi_init(struct tg3 *tp)
7391 {
7392         int i;
7393
7394         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7395         for (i = 1; i < tp->irq_cnt; i++)
7396                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7397 }
7398
7399 static void tg3_napi_fini(struct tg3 *tp)
7400 {
7401         int i;
7402
7403         for (i = 0; i < tp->irq_cnt; i++)
7404                 netif_napi_del(&tp->napi[i].napi);
7405 }
7406
7407 static inline void tg3_netif_stop(struct tg3 *tp)
7408 {
7409         netif_trans_update(tp->dev);    /* prevent tx timeout */
7410         tg3_napi_disable(tp);
7411         netif_carrier_off(tp->dev);
7412         netif_tx_disable(tp->dev);
7413 }
7414
7415 /* tp->lock must be held */
7416 static inline void tg3_netif_start(struct tg3 *tp)
7417 {
7418         tg3_ptp_resume(tp);
7419
7420         /* NOTE: unconditional netif_tx_wake_all_queues is only
7421          * appropriate so long as all callers are assured to
7422          * have free tx slots (such as after tg3_init_hw)
7423          */
7424         netif_tx_wake_all_queues(tp->dev);
7425
7426         if (tp->link_up)
7427                 netif_carrier_on(tp->dev);
7428
7429         tg3_napi_enable(tp);
7430         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7431         tg3_enable_ints(tp);
7432 }
7433
7434 static void tg3_irq_quiesce(struct tg3 *tp)
7435         __releases(tp->lock)
7436         __acquires(tp->lock)
7437 {
7438         int i;
7439
7440         BUG_ON(tp->irq_sync);
7441
7442         tp->irq_sync = 1;
7443         smp_mb();
7444
7445         spin_unlock_bh(&tp->lock);
7446
7447         for (i = 0; i < tp->irq_cnt; i++)
7448                 synchronize_irq(tp->napi[i].irq_vec);
7449
7450         spin_lock_bh(&tp->lock);
7451 }
7452
7453 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7454  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7455  * with as well.  Most of the time, this is not necessary except when
7456  * shutting down the device.
7457  */
7458 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7459 {
7460         spin_lock_bh(&tp->lock);
7461         if (irq_sync)
7462                 tg3_irq_quiesce(tp);
7463 }
7464
7465 static inline void tg3_full_unlock(struct tg3 *tp)
7466 {
7467         spin_unlock_bh(&tp->lock);
7468 }
7469
7470 /* One-shot MSI handler - Chip automatically disables interrupt
7471  * after sending MSI so driver doesn't have to do it.
7472  */
7473 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7474 {
7475         struct tg3_napi *tnapi = dev_id;
7476         struct tg3 *tp = tnapi->tp;
7477
7478         prefetch(tnapi->hw_status);
7479         if (tnapi->rx_rcb)
7480                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7481
7482         if (likely(!tg3_irq_sync(tp)))
7483                 napi_schedule(&tnapi->napi);
7484
7485         return IRQ_HANDLED;
7486 }
7487
7488 /* MSI ISR - No need to check for interrupt sharing and no need to
7489  * flush status block and interrupt mailbox. PCI ordering rules
7490  * guarantee that MSI will arrive after the status block.
7491  */
7492 static irqreturn_t tg3_msi(int irq, void *dev_id)
7493 {
7494         struct tg3_napi *tnapi = dev_id;
7495         struct tg3 *tp = tnapi->tp;
7496
7497         prefetch(tnapi->hw_status);
7498         if (tnapi->rx_rcb)
7499                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7500         /*
7501          * Writing any value to intr-mbox-0 clears PCI INTA# and
7502          * chip-internal interrupt pending events.
7503          * Writing non-zero to intr-mbox-0 additional tells the
7504          * NIC to stop sending us irqs, engaging "in-intr-handler"
7505          * event coalescing.
7506          */
7507         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7508         if (likely(!tg3_irq_sync(tp)))
7509                 napi_schedule(&tnapi->napi);
7510
7511         return IRQ_RETVAL(1);
7512 }
7513
7514 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7515 {
7516         struct tg3_napi *tnapi = dev_id;
7517         struct tg3 *tp = tnapi->tp;
7518         struct tg3_hw_status *sblk = tnapi->hw_status;
7519         unsigned int handled = 1;
7520
7521         /* In INTx mode, it is possible for the interrupt to arrive at
7522          * the CPU before the status block posted prior to the interrupt.
7523          * Reading the PCI State register will confirm whether the
7524          * interrupt is ours and will flush the status block.
7525          */
7526         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7527                 if (tg3_flag(tp, CHIP_RESETTING) ||
7528                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7529                         handled = 0;
7530                         goto out;
7531                 }
7532         }
7533
7534         /*
7535          * Writing any value to intr-mbox-0 clears PCI INTA# and
7536          * chip-internal interrupt pending events.
7537          * Writing non-zero to intr-mbox-0 additional tells the
7538          * NIC to stop sending us irqs, engaging "in-intr-handler"
7539          * event coalescing.
7540          *
7541          * Flush the mailbox to de-assert the IRQ immediately to prevent
7542          * spurious interrupts.  The flush impacts performance but
7543          * excessive spurious interrupts can be worse in some cases.
7544          */
7545         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7546         if (tg3_irq_sync(tp))
7547                 goto out;
7548         sblk->status &= ~SD_STATUS_UPDATED;
7549         if (likely(tg3_has_work(tnapi))) {
7550                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7551                 napi_schedule(&tnapi->napi);
7552         } else {
7553                 /* No work, shared interrupt perhaps?  re-enable
7554                  * interrupts, and flush that PCI write
7555                  */
7556                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7557                                0x00000000);
7558         }
7559 out:
7560         return IRQ_RETVAL(handled);
7561 }
7562
7563 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7564 {
7565         struct tg3_napi *tnapi = dev_id;
7566         struct tg3 *tp = tnapi->tp;
7567         struct tg3_hw_status *sblk = tnapi->hw_status;
7568         unsigned int handled = 1;
7569
7570         /* In INTx mode, it is possible for the interrupt to arrive at
7571          * the CPU before the status block posted prior to the interrupt.
7572          * Reading the PCI State register will confirm whether the
7573          * interrupt is ours and will flush the status block.
7574          */
7575         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7576                 if (tg3_flag(tp, CHIP_RESETTING) ||
7577                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7578                         handled = 0;
7579                         goto out;
7580                 }
7581         }
7582
7583         /*
7584          * writing any value to intr-mbox-0 clears PCI INTA# and
7585          * chip-internal interrupt pending events.
7586          * writing non-zero to intr-mbox-0 additional tells the
7587          * NIC to stop sending us irqs, engaging "in-intr-handler"
7588          * event coalescing.
7589          *
7590          * Flush the mailbox to de-assert the IRQ immediately to prevent
7591          * spurious interrupts.  The flush impacts performance but
7592          * excessive spurious interrupts can be worse in some cases.
7593          */
7594         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7595
7596         /*
7597          * In a shared interrupt configuration, sometimes other devices'
7598          * interrupts will scream.  We record the current status tag here
7599          * so that the above check can report that the screaming interrupts
7600          * are unhandled.  Eventually they will be silenced.
7601          */
7602         tnapi->last_irq_tag = sblk->status_tag;
7603
7604         if (tg3_irq_sync(tp))
7605                 goto out;
7606
7607         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7608
7609         napi_schedule(&tnapi->napi);
7610
7611 out:
7612         return IRQ_RETVAL(handled);
7613 }
7614
7615 /* ISR for interrupt test */
7616 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7617 {
7618         struct tg3_napi *tnapi = dev_id;
7619         struct tg3 *tp = tnapi->tp;
7620         struct tg3_hw_status *sblk = tnapi->hw_status;
7621
7622         if ((sblk->status & SD_STATUS_UPDATED) ||
7623             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7624                 tg3_disable_ints(tp);
7625                 return IRQ_RETVAL(1);
7626         }
7627         return IRQ_RETVAL(0);
7628 }
7629
7630 #ifdef CONFIG_NET_POLL_CONTROLLER
7631 static void tg3_poll_controller(struct net_device *dev)
7632 {
7633         int i;
7634         struct tg3 *tp = netdev_priv(dev);
7635
7636         if (tg3_irq_sync(tp))
7637                 return;
7638
7639         for (i = 0; i < tp->irq_cnt; i++)
7640                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7641 }
7642 #endif
7643
7644 static void tg3_tx_timeout(struct net_device *dev)
7645 {
7646         struct tg3 *tp = netdev_priv(dev);
7647
7648         if (netif_msg_tx_err(tp)) {
7649                 netdev_err(dev, "transmit timed out, resetting\n");
7650                 tg3_dump_state(tp);
7651         }
7652
7653         tg3_reset_task_schedule(tp);
7654 }
7655
7656 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7657 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7658 {
7659         u32 base = (u32) mapping & 0xffffffff;
7660
7661         return base + len + 8 < base;
7662 }
7663
7664 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7665  * of any 4GB boundaries: 4G, 8G, etc
7666  */
7667 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7668                                            u32 len, u32 mss)
7669 {
7670         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7671                 u32 base = (u32) mapping & 0xffffffff;
7672
7673                 return ((base + len + (mss & 0x3fff)) < base);
7674         }
7675         return 0;
7676 }
7677
7678 /* Test for DMA addresses > 40-bit */
7679 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7680                                           int len)
7681 {
7682 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7683         if (tg3_flag(tp, 40BIT_DMA_BUG))
7684                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7685         return 0;
7686 #else
7687         return 0;
7688 #endif
7689 }
7690
7691 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7692                                  dma_addr_t mapping, u32 len, u32 flags,
7693                                  u32 mss, u32 vlan)
7694 {
7695         txbd->addr_hi = ((u64) mapping >> 32);
7696         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7697         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7698         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7699 }
7700
7701 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7702                             dma_addr_t map, u32 len, u32 flags,
7703                             u32 mss, u32 vlan)
7704 {
7705         struct tg3 *tp = tnapi->tp;
7706         bool hwbug = false;
7707
7708         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7709                 hwbug = true;
7710
7711         if (tg3_4g_overflow_test(map, len))
7712                 hwbug = true;
7713
7714         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7715                 hwbug = true;
7716
7717         if (tg3_40bit_overflow_test(tp, map, len))
7718                 hwbug = true;
7719
7720         if (tp->dma_limit) {
7721                 u32 prvidx = *entry;
7722                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7723                 while (len > tp->dma_limit && *budget) {
7724                         u32 frag_len = tp->dma_limit;
7725                         len -= tp->dma_limit;
7726
7727                         /* Avoid the 8byte DMA problem */
7728                         if (len <= 8) {
7729                                 len += tp->dma_limit / 2;
7730                                 frag_len = tp->dma_limit / 2;
7731                         }
7732
7733                         tnapi->tx_buffers[*entry].fragmented = true;
7734
7735                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736                                       frag_len, tmp_flag, mss, vlan);
7737                         *budget -= 1;
7738                         prvidx = *entry;
7739                         *entry = NEXT_TX(*entry);
7740
7741                         map += frag_len;
7742                 }
7743
7744                 if (len) {
7745                         if (*budget) {
7746                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747                                               len, flags, mss, vlan);
7748                                 *budget -= 1;
7749                                 *entry = NEXT_TX(*entry);
7750                         } else {
7751                                 hwbug = true;
7752                                 tnapi->tx_buffers[prvidx].fragmented = false;
7753                         }
7754                 }
7755         } else {
7756                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7757                               len, flags, mss, vlan);
7758                 *entry = NEXT_TX(*entry);
7759         }
7760
7761         return hwbug;
7762 }
7763
7764 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7765 {
7766         int i;
7767         struct sk_buff *skb;
7768         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7769
7770         skb = txb->skb;
7771         txb->skb = NULL;
7772
7773         pci_unmap_single(tnapi->tp->pdev,
7774                          dma_unmap_addr(txb, mapping),
7775                          skb_headlen(skb),
7776                          PCI_DMA_TODEVICE);
7777
7778         while (txb->fragmented) {
7779                 txb->fragmented = false;
7780                 entry = NEXT_TX(entry);
7781                 txb = &tnapi->tx_buffers[entry];
7782         }
7783
7784         for (i = 0; i <= last; i++) {
7785                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7786
7787                 entry = NEXT_TX(entry);
7788                 txb = &tnapi->tx_buffers[entry];
7789
7790                 pci_unmap_page(tnapi->tp->pdev,
7791                                dma_unmap_addr(txb, mapping),
7792                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7793
7794                 while (txb->fragmented) {
7795                         txb->fragmented = false;
7796                         entry = NEXT_TX(entry);
7797                         txb = &tnapi->tx_buffers[entry];
7798                 }
7799         }
7800 }
7801
7802 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7803 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7804                                        struct sk_buff **pskb,
7805                                        u32 *entry, u32 *budget,
7806                                        u32 base_flags, u32 mss, u32 vlan)
7807 {
7808         struct tg3 *tp = tnapi->tp;
7809         struct sk_buff *new_skb, *skb = *pskb;
7810         dma_addr_t new_addr = 0;
7811         int ret = 0;
7812
7813         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7814                 new_skb = skb_copy(skb, GFP_ATOMIC);
7815         else {
7816                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7817
7818                 new_skb = skb_copy_expand(skb,
7819                                           skb_headroom(skb) + more_headroom,
7820                                           skb_tailroom(skb), GFP_ATOMIC);
7821         }
7822
7823         if (!new_skb) {
7824                 ret = -1;
7825         } else {
7826                 /* New SKB is guaranteed to be linear. */
7827                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7828                                           PCI_DMA_TODEVICE);
7829                 /* Make sure the mapping succeeded */
7830                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7831                         dev_kfree_skb_any(new_skb);
7832                         ret = -1;
7833                 } else {
7834                         u32 save_entry = *entry;
7835
7836                         base_flags |= TXD_FLAG_END;
7837
7838                         tnapi->tx_buffers[*entry].skb = new_skb;
7839                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7840                                            mapping, new_addr);
7841
7842                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7843                                             new_skb->len, base_flags,
7844                                             mss, vlan)) {
7845                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7846                                 dev_kfree_skb_any(new_skb);
7847                                 ret = -1;
7848                         }
7849                 }
7850         }
7851
7852         dev_consume_skb_any(skb);
7853         *pskb = new_skb;
7854         return ret;
7855 }
7856
7857 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7858 {
7859         /* Check if we will never have enough descriptors,
7860          * as gso_segs can be more than current ring size
7861          */
7862         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7863 }
7864
7865 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7866
7867 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7868  * indicated in tg3_tx_frag_set()
7869  */
7870 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7871                        struct netdev_queue *txq, struct sk_buff *skb)
7872 {
7873         struct sk_buff *segs, *nskb;
7874         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7875
7876         /* Estimate the number of fragments in the worst case */
7877         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7878                 netif_tx_stop_queue(txq);
7879
7880                 /* netif_tx_stop_queue() must be done before checking
7881                  * checking tx index in tg3_tx_avail() below, because in
7882                  * tg3_tx(), we update tx index before checking for
7883                  * netif_tx_queue_stopped().
7884                  */
7885                 smp_mb();
7886                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7887                         return NETDEV_TX_BUSY;
7888
7889                 netif_tx_wake_queue(txq);
7890         }
7891
7892         segs = skb_gso_segment(skb, tp->dev->features &
7893                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7894         if (IS_ERR(segs) || !segs)
7895                 goto tg3_tso_bug_end;
7896
7897         do {
7898                 nskb = segs;
7899                 segs = segs->next;
7900                 nskb->next = NULL;
7901                 tg3_start_xmit(nskb, tp->dev);
7902         } while (segs);
7903
7904 tg3_tso_bug_end:
7905         dev_consume_skb_any(skb);
7906
7907         return NETDEV_TX_OK;
7908 }
7909
7910 /* hard_start_xmit for all devices */
7911 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7912 {
7913         struct tg3 *tp = netdev_priv(dev);
7914         u32 len, entry, base_flags, mss, vlan = 0;
7915         u32 budget;
7916         int i = -1, would_hit_hwbug;
7917         dma_addr_t mapping;
7918         struct tg3_napi *tnapi;
7919         struct netdev_queue *txq;
7920         unsigned int last;
7921         struct iphdr *iph = NULL;
7922         struct tcphdr *tcph = NULL;
7923         __sum16 tcp_csum = 0, ip_csum = 0;
7924         __be16 ip_tot_len = 0;
7925
7926         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7927         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7928         if (tg3_flag(tp, ENABLE_TSS))
7929                 tnapi++;
7930
7931         budget = tg3_tx_avail(tnapi);
7932
7933         /* We are running in BH disabled context with netif_tx_lock
7934          * and TX reclaim runs via tp->napi.poll inside of a software
7935          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7936          * no IRQ context deadlocks to worry about either.  Rejoice!
7937          */
7938         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7939                 if (!netif_tx_queue_stopped(txq)) {
7940                         netif_tx_stop_queue(txq);
7941
7942                         /* This is a hard error, log it. */
7943                         netdev_err(dev,
7944                                    "BUG! Tx Ring full when queue awake!\n");
7945                 }
7946                 return NETDEV_TX_BUSY;
7947         }
7948
7949         entry = tnapi->tx_prod;
7950         base_flags = 0;
7951
7952         mss = skb_shinfo(skb)->gso_size;
7953         if (mss) {
7954                 u32 tcp_opt_len, hdr_len;
7955
7956                 if (skb_cow_head(skb, 0))
7957                         goto drop;
7958
7959                 iph = ip_hdr(skb);
7960                 tcp_opt_len = tcp_optlen(skb);
7961
7962                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7963
7964                 /* HW/FW can not correctly segment packets that have been
7965                  * vlan encapsulated.
7966                  */
7967                 if (skb->protocol == htons(ETH_P_8021Q) ||
7968                     skb->protocol == htons(ETH_P_8021AD)) {
7969                         if (tg3_tso_bug_gso_check(tnapi, skb))
7970                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7971                         goto drop;
7972                 }
7973
7974                 if (!skb_is_gso_v6(skb)) {
7975                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7976                             tg3_flag(tp, TSO_BUG)) {
7977                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7978                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7979                                 goto drop;
7980                         }
7981                         ip_csum = iph->check;
7982                         ip_tot_len = iph->tot_len;
7983                         iph->check = 0;
7984                         iph->tot_len = htons(mss + hdr_len);
7985                 }
7986
7987                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7988                                TXD_FLAG_CPU_POST_DMA);
7989
7990                 tcph = tcp_hdr(skb);
7991                 tcp_csum = tcph->check;
7992
7993                 if (tg3_flag(tp, HW_TSO_1) ||
7994                     tg3_flag(tp, HW_TSO_2) ||
7995                     tg3_flag(tp, HW_TSO_3)) {
7996                         tcph->check = 0;
7997                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7998                 } else {
7999                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8000                                                          0, IPPROTO_TCP, 0);
8001                 }
8002
8003                 if (tg3_flag(tp, HW_TSO_3)) {
8004                         mss |= (hdr_len & 0xc) << 12;
8005                         if (hdr_len & 0x10)
8006                                 base_flags |= 0x00000010;
8007                         base_flags |= (hdr_len & 0x3e0) << 5;
8008                 } else if (tg3_flag(tp, HW_TSO_2))
8009                         mss |= hdr_len << 9;
8010                 else if (tg3_flag(tp, HW_TSO_1) ||
8011                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8012                         if (tcp_opt_len || iph->ihl > 5) {
8013                                 int tsflags;
8014
8015                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8016                                 mss |= (tsflags << 11);
8017                         }
8018                 } else {
8019                         if (tcp_opt_len || iph->ihl > 5) {
8020                                 int tsflags;
8021
8022                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8023                                 base_flags |= tsflags << 12;
8024                         }
8025                 }
8026         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8027                 /* HW/FW can not correctly checksum packets that have been
8028                  * vlan encapsulated.
8029                  */
8030                 if (skb->protocol == htons(ETH_P_8021Q) ||
8031                     skb->protocol == htons(ETH_P_8021AD)) {
8032                         if (skb_checksum_help(skb))
8033                                 goto drop;
8034                 } else  {
8035                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8036                 }
8037         }
8038
8039         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8040             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8041                 base_flags |= TXD_FLAG_JMB_PKT;
8042
8043         if (skb_vlan_tag_present(skb)) {
8044                 base_flags |= TXD_FLAG_VLAN;
8045                 vlan = skb_vlan_tag_get(skb);
8046         }
8047
8048         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8049             tg3_flag(tp, TX_TSTAMP_EN)) {
8050                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8051                 base_flags |= TXD_FLAG_HWTSTAMP;
8052         }
8053
8054         len = skb_headlen(skb);
8055
8056         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8057         if (pci_dma_mapping_error(tp->pdev, mapping))
8058                 goto drop;
8059
8060
8061         tnapi->tx_buffers[entry].skb = skb;
8062         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8063
8064         would_hit_hwbug = 0;
8065
8066         if (tg3_flag(tp, 5701_DMA_BUG))
8067                 would_hit_hwbug = 1;
8068
8069         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8070                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8071                             mss, vlan)) {
8072                 would_hit_hwbug = 1;
8073         } else if (skb_shinfo(skb)->nr_frags > 0) {
8074                 u32 tmp_mss = mss;
8075
8076                 if (!tg3_flag(tp, HW_TSO_1) &&
8077                     !tg3_flag(tp, HW_TSO_2) &&
8078                     !tg3_flag(tp, HW_TSO_3))
8079                         tmp_mss = 0;
8080
8081                 /* Now loop through additional data
8082                  * fragments, and queue them.
8083                  */
8084                 last = skb_shinfo(skb)->nr_frags - 1;
8085                 for (i = 0; i <= last; i++) {
8086                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8087
8088                         len = skb_frag_size(frag);
8089                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8090                                                    len, DMA_TO_DEVICE);
8091
8092                         tnapi->tx_buffers[entry].skb = NULL;
8093                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8094                                            mapping);
8095                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8096                                 goto dma_error;
8097
8098                         if (!budget ||
8099                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8100                                             len, base_flags |
8101                                             ((i == last) ? TXD_FLAG_END : 0),
8102                                             tmp_mss, vlan)) {
8103                                 would_hit_hwbug = 1;
8104                                 break;
8105                         }
8106                 }
8107         }
8108
8109         if (would_hit_hwbug) {
8110                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8111
8112                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8113                         /* If it's a TSO packet, do GSO instead of
8114                          * allocating and copying to a large linear SKB
8115                          */
8116                         if (ip_tot_len) {
8117                                 iph->check = ip_csum;
8118                                 iph->tot_len = ip_tot_len;
8119                         }
8120                         tcph->check = tcp_csum;
8121                         return tg3_tso_bug(tp, tnapi, txq, skb);
8122                 }
8123
8124                 /* If the workaround fails due to memory/mapping
8125                  * failure, silently drop this packet.
8126                  */
8127                 entry = tnapi->tx_prod;
8128                 budget = tg3_tx_avail(tnapi);
8129                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8130                                                 base_flags, mss, vlan))
8131                         goto drop_nofree;
8132         }
8133
8134         skb_tx_timestamp(skb);
8135         netdev_tx_sent_queue(txq, skb->len);
8136
8137         /* Sync BD data before updating mailbox */
8138         wmb();
8139
8140         tnapi->tx_prod = entry;
8141         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8142                 netif_tx_stop_queue(txq);
8143
8144                 /* netif_tx_stop_queue() must be done before checking
8145                  * checking tx index in tg3_tx_avail() below, because in
8146                  * tg3_tx(), we update tx index before checking for
8147                  * netif_tx_queue_stopped().
8148                  */
8149                 smp_mb();
8150                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8151                         netif_tx_wake_queue(txq);
8152         }
8153
8154         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8155                 /* Packets are ready, update Tx producer idx on card. */
8156                 tw32_tx_mbox(tnapi->prodmbox, entry);
8157         }
8158
8159         return NETDEV_TX_OK;
8160
8161 dma_error:
8162         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8163         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8164 drop:
8165         dev_kfree_skb_any(skb);
8166 drop_nofree:
8167         tp->tx_dropped++;
8168         return NETDEV_TX_OK;
8169 }
8170
8171 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8172 {
8173         if (enable) {
8174                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8175                                   MAC_MODE_PORT_MODE_MASK);
8176
8177                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8178
8179                 if (!tg3_flag(tp, 5705_PLUS))
8180                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8181
8182                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8183                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8184                 else
8185                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8186         } else {
8187                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8188
8189                 if (tg3_flag(tp, 5705_PLUS) ||
8190                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8191                     tg3_asic_rev(tp) == ASIC_REV_5700)
8192                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8193         }
8194
8195         tw32(MAC_MODE, tp->mac_mode);
8196         udelay(40);
8197 }
8198
8199 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8200 {
8201         u32 val, bmcr, mac_mode, ptest = 0;
8202
8203         tg3_phy_toggle_apd(tp, false);
8204         tg3_phy_toggle_automdix(tp, false);
8205
8206         if (extlpbk && tg3_phy_set_extloopbk(tp))
8207                 return -EIO;
8208
8209         bmcr = BMCR_FULLDPLX;
8210         switch (speed) {
8211         case SPEED_10:
8212                 break;
8213         case SPEED_100:
8214                 bmcr |= BMCR_SPEED100;
8215                 break;
8216         case SPEED_1000:
8217         default:
8218                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8219                         speed = SPEED_100;
8220                         bmcr |= BMCR_SPEED100;
8221                 } else {
8222                         speed = SPEED_1000;
8223                         bmcr |= BMCR_SPEED1000;
8224                 }
8225         }
8226
8227         if (extlpbk) {
8228                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8229                         tg3_readphy(tp, MII_CTRL1000, &val);
8230                         val |= CTL1000_AS_MASTER |
8231                                CTL1000_ENABLE_MASTER;
8232                         tg3_writephy(tp, MII_CTRL1000, val);
8233                 } else {
8234                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8235                                 MII_TG3_FET_PTEST_TRIM_2;
8236                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8237                 }
8238         } else
8239                 bmcr |= BMCR_LOOPBACK;
8240
8241         tg3_writephy(tp, MII_BMCR, bmcr);
8242
8243         /* The write needs to be flushed for the FETs */
8244         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8245                 tg3_readphy(tp, MII_BMCR, &bmcr);
8246
8247         udelay(40);
8248
8249         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8250             tg3_asic_rev(tp) == ASIC_REV_5785) {
8251                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8252                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8253                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8254
8255                 /* The write needs to be flushed for the AC131 */
8256                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8257         }
8258
8259         /* Reset to prevent losing 1st rx packet intermittently */
8260         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8261             tg3_flag(tp, 5780_CLASS)) {
8262                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8263                 udelay(10);
8264                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8265         }
8266
8267         mac_mode = tp->mac_mode &
8268                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8269         if (speed == SPEED_1000)
8270                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8271         else
8272                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8273
8274         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8275                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8276
8277                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8278                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8279                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8280                         mac_mode |= MAC_MODE_LINK_POLARITY;
8281
8282                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8283                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8284         }
8285
8286         tw32(MAC_MODE, mac_mode);
8287         udelay(40);
8288
8289         return 0;
8290 }
8291
8292 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8293 {
8294         struct tg3 *tp = netdev_priv(dev);
8295
8296         if (features & NETIF_F_LOOPBACK) {
8297                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8298                         return;
8299
8300                 spin_lock_bh(&tp->lock);
8301                 tg3_mac_loopback(tp, true);
8302                 netif_carrier_on(tp->dev);
8303                 spin_unlock_bh(&tp->lock);
8304                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8305         } else {
8306                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8307                         return;
8308
8309                 spin_lock_bh(&tp->lock);
8310                 tg3_mac_loopback(tp, false);
8311                 /* Force link status check */
8312                 tg3_setup_phy(tp, true);
8313                 spin_unlock_bh(&tp->lock);
8314                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8315         }
8316 }
8317
8318 static netdev_features_t tg3_fix_features(struct net_device *dev,
8319         netdev_features_t features)
8320 {
8321         struct tg3 *tp = netdev_priv(dev);
8322
8323         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8324                 features &= ~NETIF_F_ALL_TSO;
8325
8326         return features;
8327 }
8328
8329 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8330 {
8331         netdev_features_t changed = dev->features ^ features;
8332
8333         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8334                 tg3_set_loopback(dev, features);
8335
8336         return 0;
8337 }
8338
8339 static void tg3_rx_prodring_free(struct tg3 *tp,
8340                                  struct tg3_rx_prodring_set *tpr)
8341 {
8342         int i;
8343
8344         if (tpr != &tp->napi[0].prodring) {
8345                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8346                      i = (i + 1) & tp->rx_std_ring_mask)
8347                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8348                                         tp->rx_pkt_map_sz);
8349
8350                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8351                         for (i = tpr->rx_jmb_cons_idx;
8352                              i != tpr->rx_jmb_prod_idx;
8353                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8354                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8355                                                 TG3_RX_JMB_MAP_SZ);
8356                         }
8357                 }
8358
8359                 return;
8360         }
8361
8362         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8363                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8364                                 tp->rx_pkt_map_sz);
8365
8366         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8367                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8368                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8369                                         TG3_RX_JMB_MAP_SZ);
8370         }
8371 }
8372
8373 /* Initialize rx rings for packet processing.
8374  *
8375  * The chip has been shut down and the driver detached from
8376  * the networking, so no interrupts or new tx packets will
8377  * end up in the driver.  tp->{tx,}lock are held and thus
8378  * we may not sleep.
8379  */
8380 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8381                                  struct tg3_rx_prodring_set *tpr)
8382 {
8383         u32 i, rx_pkt_dma_sz;
8384
8385         tpr->rx_std_cons_idx = 0;
8386         tpr->rx_std_prod_idx = 0;
8387         tpr->rx_jmb_cons_idx = 0;
8388         tpr->rx_jmb_prod_idx = 0;
8389
8390         if (tpr != &tp->napi[0].prodring) {
8391                 memset(&tpr->rx_std_buffers[0], 0,
8392                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8393                 if (tpr->rx_jmb_buffers)
8394                         memset(&tpr->rx_jmb_buffers[0], 0,
8395                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8396                 goto done;
8397         }
8398
8399         /* Zero out all descriptors. */
8400         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8401
8402         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8403         if (tg3_flag(tp, 5780_CLASS) &&
8404             tp->dev->mtu > ETH_DATA_LEN)
8405                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8406         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8407
8408         /* Initialize invariants of the rings, we only set this
8409          * stuff once.  This works because the card does not
8410          * write into the rx buffer posting rings.
8411          */
8412         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8413                 struct tg3_rx_buffer_desc *rxd;
8414
8415                 rxd = &tpr->rx_std[i];
8416                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8417                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8418                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8419                                (i << RXD_OPAQUE_INDEX_SHIFT));
8420         }
8421
8422         /* Now allocate fresh SKBs for each rx ring. */
8423         for (i = 0; i < tp->rx_pending; i++) {
8424                 unsigned int frag_size;
8425
8426                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8427                                       &frag_size) < 0) {
8428                         netdev_warn(tp->dev,
8429                                     "Using a smaller RX standard ring. Only "
8430                                     "%d out of %d buffers were allocated "
8431                                     "successfully\n", i, tp->rx_pending);
8432                         if (i == 0)
8433                                 goto initfail;
8434                         tp->rx_pending = i;
8435                         break;
8436                 }
8437         }
8438
8439         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8440                 goto done;
8441
8442         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8443
8444         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8445                 goto done;
8446
8447         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8448                 struct tg3_rx_buffer_desc *rxd;
8449
8450                 rxd = &tpr->rx_jmb[i].std;
8451                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8452                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8453                                   RXD_FLAG_JUMBO;
8454                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8455                        (i << RXD_OPAQUE_INDEX_SHIFT));
8456         }
8457
8458         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8459                 unsigned int frag_size;
8460
8461                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8462                                       &frag_size) < 0) {
8463                         netdev_warn(tp->dev,
8464                                     "Using a smaller RX jumbo ring. Only %d "
8465                                     "out of %d buffers were allocated "
8466                                     "successfully\n", i, tp->rx_jumbo_pending);
8467                         if (i == 0)
8468                                 goto initfail;
8469                         tp->rx_jumbo_pending = i;
8470                         break;
8471                 }
8472         }
8473
8474 done:
8475         return 0;
8476
8477 initfail:
8478         tg3_rx_prodring_free(tp, tpr);
8479         return -ENOMEM;
8480 }
8481
8482 static void tg3_rx_prodring_fini(struct tg3 *tp,
8483                                  struct tg3_rx_prodring_set *tpr)
8484 {
8485         kfree(tpr->rx_std_buffers);
8486         tpr->rx_std_buffers = NULL;
8487         kfree(tpr->rx_jmb_buffers);
8488         tpr->rx_jmb_buffers = NULL;
8489         if (tpr->rx_std) {
8490                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8491                                   tpr->rx_std, tpr->rx_std_mapping);
8492                 tpr->rx_std = NULL;
8493         }
8494         if (tpr->rx_jmb) {
8495                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8496                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8497                 tpr->rx_jmb = NULL;
8498         }
8499 }
8500
8501 static int tg3_rx_prodring_init(struct tg3 *tp,
8502                                 struct tg3_rx_prodring_set *tpr)
8503 {
8504         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8505                                       GFP_KERNEL);
8506         if (!tpr->rx_std_buffers)
8507                 return -ENOMEM;
8508
8509         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8510                                          TG3_RX_STD_RING_BYTES(tp),
8511                                          &tpr->rx_std_mapping,
8512                                          GFP_KERNEL);
8513         if (!tpr->rx_std)
8514                 goto err_out;
8515
8516         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8517                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8518                                               GFP_KERNEL);
8519                 if (!tpr->rx_jmb_buffers)
8520                         goto err_out;
8521
8522                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8523                                                  TG3_RX_JMB_RING_BYTES(tp),
8524                                                  &tpr->rx_jmb_mapping,
8525                                                  GFP_KERNEL);
8526                 if (!tpr->rx_jmb)
8527                         goto err_out;
8528         }
8529
8530         return 0;
8531
8532 err_out:
8533         tg3_rx_prodring_fini(tp, tpr);
8534         return -ENOMEM;
8535 }
8536
8537 /* Free up pending packets in all rx/tx rings.
8538  *
8539  * The chip has been shut down and the driver detached from
8540  * the networking, so no interrupts or new tx packets will
8541  * end up in the driver.  tp->{tx,}lock is not held and we are not
8542  * in an interrupt context and thus may sleep.
8543  */
8544 static void tg3_free_rings(struct tg3 *tp)
8545 {
8546         int i, j;
8547
8548         for (j = 0; j < tp->irq_cnt; j++) {
8549                 struct tg3_napi *tnapi = &tp->napi[j];
8550
8551                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8552
8553                 if (!tnapi->tx_buffers)
8554                         continue;
8555
8556                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8557                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8558
8559                         if (!skb)
8560                                 continue;
8561
8562                         tg3_tx_skb_unmap(tnapi, i,
8563                                          skb_shinfo(skb)->nr_frags - 1);
8564
8565                         dev_consume_skb_any(skb);
8566                 }
8567                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8568         }
8569 }
8570
8571 /* Initialize tx/rx rings for packet processing.
8572  *
8573  * The chip has been shut down and the driver detached from
8574  * the networking, so no interrupts or new tx packets will
8575  * end up in the driver.  tp->{tx,}lock are held and thus
8576  * we may not sleep.
8577  */
8578 static int tg3_init_rings(struct tg3 *tp)
8579 {
8580         int i;
8581
8582         /* Free up all the SKBs. */
8583         tg3_free_rings(tp);
8584
8585         for (i = 0; i < tp->irq_cnt; i++) {
8586                 struct tg3_napi *tnapi = &tp->napi[i];
8587
8588                 tnapi->last_tag = 0;
8589                 tnapi->last_irq_tag = 0;
8590                 tnapi->hw_status->status = 0;
8591                 tnapi->hw_status->status_tag = 0;
8592                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8593
8594                 tnapi->tx_prod = 0;
8595                 tnapi->tx_cons = 0;
8596                 if (tnapi->tx_ring)
8597                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8598
8599                 tnapi->rx_rcb_ptr = 0;
8600                 if (tnapi->rx_rcb)
8601                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8602
8603                 if (tnapi->prodring.rx_std &&
8604                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8605                         tg3_free_rings(tp);
8606                         return -ENOMEM;
8607                 }
8608         }
8609
8610         return 0;
8611 }
8612
8613 static void tg3_mem_tx_release(struct tg3 *tp)
8614 {
8615         int i;
8616
8617         for (i = 0; i < tp->irq_max; i++) {
8618                 struct tg3_napi *tnapi = &tp->napi[i];
8619
8620                 if (tnapi->tx_ring) {
8621                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8622                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8623                         tnapi->tx_ring = NULL;
8624                 }
8625
8626                 kfree(tnapi->tx_buffers);
8627                 tnapi->tx_buffers = NULL;
8628         }
8629 }
8630
8631 static int tg3_mem_tx_acquire(struct tg3 *tp)
8632 {
8633         int i;
8634         struct tg3_napi *tnapi = &tp->napi[0];
8635
8636         /* If multivector TSS is enabled, vector 0 does not handle
8637          * tx interrupts.  Don't allocate any resources for it.
8638          */
8639         if (tg3_flag(tp, ENABLE_TSS))
8640                 tnapi++;
8641
8642         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8643                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8644                                             sizeof(struct tg3_tx_ring_info),
8645                                             GFP_KERNEL);
8646                 if (!tnapi->tx_buffers)
8647                         goto err_out;
8648
8649                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8650                                                     TG3_TX_RING_BYTES,
8651                                                     &tnapi->tx_desc_mapping,
8652                                                     GFP_KERNEL);
8653                 if (!tnapi->tx_ring)
8654                         goto err_out;
8655         }
8656
8657         return 0;
8658
8659 err_out:
8660         tg3_mem_tx_release(tp);
8661         return -ENOMEM;
8662 }
8663
8664 static void tg3_mem_rx_release(struct tg3 *tp)
8665 {
8666         int i;
8667
8668         for (i = 0; i < tp->irq_max; i++) {
8669                 struct tg3_napi *tnapi = &tp->napi[i];
8670
8671                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8672
8673                 if (!tnapi->rx_rcb)
8674                         continue;
8675
8676                 dma_free_coherent(&tp->pdev->dev,
8677                                   TG3_RX_RCB_RING_BYTES(tp),
8678                                   tnapi->rx_rcb,
8679                                   tnapi->rx_rcb_mapping);
8680                 tnapi->rx_rcb = NULL;
8681         }
8682 }
8683
8684 static int tg3_mem_rx_acquire(struct tg3 *tp)
8685 {
8686         unsigned int i, limit;
8687
8688         limit = tp->rxq_cnt;
8689
8690         /* If RSS is enabled, we need a (dummy) producer ring
8691          * set on vector zero.  This is the true hw prodring.
8692          */
8693         if (tg3_flag(tp, ENABLE_RSS))
8694                 limit++;
8695
8696         for (i = 0; i < limit; i++) {
8697                 struct tg3_napi *tnapi = &tp->napi[i];
8698
8699                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8700                         goto err_out;
8701
8702                 /* If multivector RSS is enabled, vector 0
8703                  * does not handle rx or tx interrupts.
8704                  * Don't allocate any resources for it.
8705                  */
8706                 if (!i && tg3_flag(tp, ENABLE_RSS))
8707                         continue;
8708
8709                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8710                                                    TG3_RX_RCB_RING_BYTES(tp),
8711                                                    &tnapi->rx_rcb_mapping,
8712                                                    GFP_KERNEL);
8713                 if (!tnapi->rx_rcb)
8714                         goto err_out;
8715         }
8716
8717         return 0;
8718
8719 err_out:
8720         tg3_mem_rx_release(tp);
8721         return -ENOMEM;
8722 }
8723
8724 /*
8725  * Must not be invoked with interrupt sources disabled and
8726  * the hardware shutdown down.
8727  */
8728 static void tg3_free_consistent(struct tg3 *tp)
8729 {
8730         int i;
8731
8732         for (i = 0; i < tp->irq_cnt; i++) {
8733                 struct tg3_napi *tnapi = &tp->napi[i];
8734
8735                 if (tnapi->hw_status) {
8736                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8737                                           tnapi->hw_status,
8738                                           tnapi->status_mapping);
8739                         tnapi->hw_status = NULL;
8740                 }
8741         }
8742
8743         tg3_mem_rx_release(tp);
8744         tg3_mem_tx_release(tp);
8745
8746         /* tp->hw_stats can be referenced safely:
8747          *     1. under rtnl_lock
8748          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8749          */
8750         if (tp->hw_stats) {
8751                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8752                                   tp->hw_stats, tp->stats_mapping);
8753                 tp->hw_stats = NULL;
8754         }
8755 }
8756
8757 /*
8758  * Must not be invoked with interrupt sources disabled and
8759  * the hardware shutdown down.  Can sleep.
8760  */
8761 static int tg3_alloc_consistent(struct tg3 *tp)
8762 {
8763         int i;
8764
8765         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8766                                           sizeof(struct tg3_hw_stats),
8767                                           &tp->stats_mapping, GFP_KERNEL);
8768         if (!tp->hw_stats)
8769                 goto err_out;
8770
8771         for (i = 0; i < tp->irq_cnt; i++) {
8772                 struct tg3_napi *tnapi = &tp->napi[i];
8773                 struct tg3_hw_status *sblk;
8774
8775                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8776                                                       TG3_HW_STATUS_SIZE,
8777                                                       &tnapi->status_mapping,
8778                                                       GFP_KERNEL);
8779                 if (!tnapi->hw_status)
8780                         goto err_out;
8781
8782                 sblk = tnapi->hw_status;
8783
8784                 if (tg3_flag(tp, ENABLE_RSS)) {
8785                         u16 *prodptr = NULL;
8786
8787                         /*
8788                          * When RSS is enabled, the status block format changes
8789                          * slightly.  The "rx_jumbo_consumer", "reserved",
8790                          * and "rx_mini_consumer" members get mapped to the
8791                          * other three rx return ring producer indexes.
8792                          */
8793                         switch (i) {
8794                         case 1:
8795                                 prodptr = &sblk->idx[0].rx_producer;
8796                                 break;
8797                         case 2:
8798                                 prodptr = &sblk->rx_jumbo_consumer;
8799                                 break;
8800                         case 3:
8801                                 prodptr = &sblk->reserved;
8802                                 break;
8803                         case 4:
8804                                 prodptr = &sblk->rx_mini_consumer;
8805                                 break;
8806                         }
8807                         tnapi->rx_rcb_prod_idx = prodptr;
8808                 } else {
8809                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8810                 }
8811         }
8812
8813         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8814                 goto err_out;
8815
8816         return 0;
8817
8818 err_out:
8819         tg3_free_consistent(tp);
8820         return -ENOMEM;
8821 }
8822
8823 #define MAX_WAIT_CNT 1000
8824
8825 /* To stop a block, clear the enable bit and poll till it
8826  * clears.  tp->lock is held.
8827  */
8828 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8829 {
8830         unsigned int i;
8831         u32 val;
8832
8833         if (tg3_flag(tp, 5705_PLUS)) {
8834                 switch (ofs) {
8835                 case RCVLSC_MODE:
8836                 case DMAC_MODE:
8837                 case MBFREE_MODE:
8838                 case BUFMGR_MODE:
8839                 case MEMARB_MODE:
8840                         /* We can't enable/disable these bits of the
8841                          * 5705/5750, just say success.
8842                          */
8843                         return 0;
8844
8845                 default:
8846                         break;
8847                 }
8848         }
8849
8850         val = tr32(ofs);
8851         val &= ~enable_bit;
8852         tw32_f(ofs, val);
8853
8854         for (i = 0; i < MAX_WAIT_CNT; i++) {
8855                 if (pci_channel_offline(tp->pdev)) {
8856                         dev_err(&tp->pdev->dev,
8857                                 "tg3_stop_block device offline, "
8858                                 "ofs=%lx enable_bit=%x\n",
8859                                 ofs, enable_bit);
8860                         return -ENODEV;
8861                 }
8862
8863                 udelay(100);
8864                 val = tr32(ofs);
8865                 if ((val & enable_bit) == 0)
8866                         break;
8867         }
8868
8869         if (i == MAX_WAIT_CNT && !silent) {
8870                 dev_err(&tp->pdev->dev,
8871                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8872                         ofs, enable_bit);
8873                 return -ENODEV;
8874         }
8875
8876         return 0;
8877 }
8878
8879 /* tp->lock is held. */
8880 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8881 {
8882         int i, err;
8883
8884         tg3_disable_ints(tp);
8885
8886         if (pci_channel_offline(tp->pdev)) {
8887                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8888                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8889                 err = -ENODEV;
8890                 goto err_no_dev;
8891         }
8892
8893         tp->rx_mode &= ~RX_MODE_ENABLE;
8894         tw32_f(MAC_RX_MODE, tp->rx_mode);
8895         udelay(10);
8896
8897         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8898         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8899         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8900         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8901         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8902         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8903
8904         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8905         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8906         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8907         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8908         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8909         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8910         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8911
8912         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8913         tw32_f(MAC_MODE, tp->mac_mode);
8914         udelay(40);
8915
8916         tp->tx_mode &= ~TX_MODE_ENABLE;
8917         tw32_f(MAC_TX_MODE, tp->tx_mode);
8918
8919         for (i = 0; i < MAX_WAIT_CNT; i++) {
8920                 udelay(100);
8921                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8922                         break;
8923         }
8924         if (i >= MAX_WAIT_CNT) {
8925                 dev_err(&tp->pdev->dev,
8926                         "%s timed out, TX_MODE_ENABLE will not clear "
8927                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8928                 err |= -ENODEV;
8929         }
8930
8931         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8932         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8933         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8934
8935         tw32(FTQ_RESET, 0xffffffff);
8936         tw32(FTQ_RESET, 0x00000000);
8937
8938         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8939         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8940
8941 err_no_dev:
8942         for (i = 0; i < tp->irq_cnt; i++) {
8943                 struct tg3_napi *tnapi = &tp->napi[i];
8944                 if (tnapi->hw_status)
8945                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8946         }
8947
8948         return err;
8949 }
8950
8951 /* Save PCI command register before chip reset */
8952 static void tg3_save_pci_state(struct tg3 *tp)
8953 {
8954         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8955 }
8956
8957 /* Restore PCI state after chip reset */
8958 static void tg3_restore_pci_state(struct tg3 *tp)
8959 {
8960         u32 val;
8961
8962         /* Re-enable indirect register accesses. */
8963         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8964                                tp->misc_host_ctrl);
8965
8966         /* Set MAX PCI retry to zero. */
8967         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8968         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8969             tg3_flag(tp, PCIX_MODE))
8970                 val |= PCISTATE_RETRY_SAME_DMA;
8971         /* Allow reads and writes to the APE register and memory space. */
8972         if (tg3_flag(tp, ENABLE_APE))
8973                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8974                        PCISTATE_ALLOW_APE_SHMEM_WR |
8975                        PCISTATE_ALLOW_APE_PSPACE_WR;
8976         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8977
8978         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8979
8980         if (!tg3_flag(tp, PCI_EXPRESS)) {
8981                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8982                                       tp->pci_cacheline_sz);
8983                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8984                                       tp->pci_lat_timer);
8985         }
8986
8987         /* Make sure PCI-X relaxed ordering bit is clear. */
8988         if (tg3_flag(tp, PCIX_MODE)) {
8989                 u16 pcix_cmd;
8990
8991                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8992                                      &pcix_cmd);
8993                 pcix_cmd &= ~PCI_X_CMD_ERO;
8994                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8995                                       pcix_cmd);
8996         }
8997
8998         if (tg3_flag(tp, 5780_CLASS)) {
8999
9000                 /* Chip reset on 5780 will reset MSI enable bit,
9001                  * so need to restore it.
9002                  */
9003                 if (tg3_flag(tp, USING_MSI)) {
9004                         u16 ctrl;
9005
9006                         pci_read_config_word(tp->pdev,
9007                                              tp->msi_cap + PCI_MSI_FLAGS,
9008                                              &ctrl);
9009                         pci_write_config_word(tp->pdev,
9010                                               tp->msi_cap + PCI_MSI_FLAGS,
9011                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9012                         val = tr32(MSGINT_MODE);
9013                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9014                 }
9015         }
9016 }
9017
9018 static void tg3_override_clk(struct tg3 *tp)
9019 {
9020         u32 val;
9021
9022         switch (tg3_asic_rev(tp)) {
9023         case ASIC_REV_5717:
9024                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9025                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9026                      TG3_CPMU_MAC_ORIDE_ENABLE);
9027                 break;
9028
9029         case ASIC_REV_5719:
9030         case ASIC_REV_5720:
9031                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9032                 break;
9033
9034         default:
9035                 return;
9036         }
9037 }
9038
9039 static void tg3_restore_clk(struct tg3 *tp)
9040 {
9041         u32 val;
9042
9043         switch (tg3_asic_rev(tp)) {
9044         case ASIC_REV_5717:
9045                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9046                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9047                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9048                 break;
9049
9050         case ASIC_REV_5719:
9051         case ASIC_REV_5720:
9052                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9053                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9054                 break;
9055
9056         default:
9057                 return;
9058         }
9059 }
9060
9061 /* tp->lock is held. */
9062 static int tg3_chip_reset(struct tg3 *tp)
9063         __releases(tp->lock)
9064         __acquires(tp->lock)
9065 {
9066         u32 val;
9067         void (*write_op)(struct tg3 *, u32, u32);
9068         int i, err;
9069
9070         if (!pci_device_is_present(tp->pdev))
9071                 return -ENODEV;
9072
9073         tg3_nvram_lock(tp);
9074
9075         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9076
9077         /* No matching tg3_nvram_unlock() after this because
9078          * chip reset below will undo the nvram lock.
9079          */
9080         tp->nvram_lock_cnt = 0;
9081
9082         /* GRC_MISC_CFG core clock reset will clear the memory
9083          * enable bit in PCI register 4 and the MSI enable bit
9084          * on some chips, so we save relevant registers here.
9085          */
9086         tg3_save_pci_state(tp);
9087
9088         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9089             tg3_flag(tp, 5755_PLUS))
9090                 tw32(GRC_FASTBOOT_PC, 0);
9091
9092         /*
9093          * We must avoid the readl() that normally takes place.
9094          * It locks machines, causes machine checks, and other
9095          * fun things.  So, temporarily disable the 5701
9096          * hardware workaround, while we do the reset.
9097          */
9098         write_op = tp->write32;
9099         if (write_op == tg3_write_flush_reg32)
9100                 tp->write32 = tg3_write32;
9101
9102         /* Prevent the irq handler from reading or writing PCI registers
9103          * during chip reset when the memory enable bit in the PCI command
9104          * register may be cleared.  The chip does not generate interrupt
9105          * at this time, but the irq handler may still be called due to irq
9106          * sharing or irqpoll.
9107          */
9108         tg3_flag_set(tp, CHIP_RESETTING);
9109         for (i = 0; i < tp->irq_cnt; i++) {
9110                 struct tg3_napi *tnapi = &tp->napi[i];
9111                 if (tnapi->hw_status) {
9112                         tnapi->hw_status->status = 0;
9113                         tnapi->hw_status->status_tag = 0;
9114                 }
9115                 tnapi->last_tag = 0;
9116                 tnapi->last_irq_tag = 0;
9117         }
9118         smp_mb();
9119
9120         tg3_full_unlock(tp);
9121
9122         for (i = 0; i < tp->irq_cnt; i++)
9123                 synchronize_irq(tp->napi[i].irq_vec);
9124
9125         tg3_full_lock(tp, 0);
9126
9127         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9128                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9129                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9130         }
9131
9132         /* do the reset */
9133         val = GRC_MISC_CFG_CORECLK_RESET;
9134
9135         if (tg3_flag(tp, PCI_EXPRESS)) {
9136                 /* Force PCIe 1.0a mode */
9137                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9138                     !tg3_flag(tp, 57765_PLUS) &&
9139                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9140                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9141                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9142
9143                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9144                         tw32(GRC_MISC_CFG, (1 << 29));
9145                         val |= (1 << 29);
9146                 }
9147         }
9148
9149         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9150                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9151                 tw32(GRC_VCPU_EXT_CTRL,
9152                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9153         }
9154
9155         /* Set the clock to the highest frequency to avoid timeouts. With link
9156          * aware mode, the clock speed could be slow and bootcode does not
9157          * complete within the expected time. Override the clock to allow the
9158          * bootcode to finish sooner and then restore it.
9159          */
9160         tg3_override_clk(tp);
9161
9162         /* Manage gphy power for all CPMU absent PCIe devices. */
9163         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9164                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9165
9166         tw32(GRC_MISC_CFG, val);
9167
9168         /* restore 5701 hardware bug workaround write method */
9169         tp->write32 = write_op;
9170
9171         /* Unfortunately, we have to delay before the PCI read back.
9172          * Some 575X chips even will not respond to a PCI cfg access
9173          * when the reset command is given to the chip.
9174          *
9175          * How do these hardware designers expect things to work
9176          * properly if the PCI write is posted for a long period
9177          * of time?  It is always necessary to have some method by
9178          * which a register read back can occur to push the write
9179          * out which does the reset.
9180          *
9181          * For most tg3 variants the trick below was working.
9182          * Ho hum...
9183          */
9184         udelay(120);
9185
9186         /* Flush PCI posted writes.  The normal MMIO registers
9187          * are inaccessible at this time so this is the only
9188          * way to make this reliably (actually, this is no longer
9189          * the case, see above).  I tried to use indirect
9190          * register read/write but this upset some 5701 variants.
9191          */
9192         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9193
9194         udelay(120);
9195
9196         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9197                 u16 val16;
9198
9199                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9200                         int j;
9201                         u32 cfg_val;
9202
9203                         /* Wait for link training to complete.  */
9204                         for (j = 0; j < 5000; j++)
9205                                 udelay(100);
9206
9207                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9208                         pci_write_config_dword(tp->pdev, 0xc4,
9209                                                cfg_val | (1 << 15));
9210                 }
9211
9212                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9213                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9214                 /*
9215                  * Older PCIe devices only support the 128 byte
9216                  * MPS setting.  Enforce the restriction.
9217                  */
9218                 if (!tg3_flag(tp, CPMU_PRESENT))
9219                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9220                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9221
9222                 /* Clear error status */
9223                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9224                                       PCI_EXP_DEVSTA_CED |
9225                                       PCI_EXP_DEVSTA_NFED |
9226                                       PCI_EXP_DEVSTA_FED |
9227                                       PCI_EXP_DEVSTA_URD);
9228         }
9229
9230         tg3_restore_pci_state(tp);
9231
9232         tg3_flag_clear(tp, CHIP_RESETTING);
9233         tg3_flag_clear(tp, ERROR_PROCESSED);
9234
9235         val = 0;
9236         if (tg3_flag(tp, 5780_CLASS))
9237                 val = tr32(MEMARB_MODE);
9238         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9239
9240         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9241                 tg3_stop_fw(tp);
9242                 tw32(0x5000, 0x400);
9243         }
9244
9245         if (tg3_flag(tp, IS_SSB_CORE)) {
9246                 /*
9247                  * BCM4785: In order to avoid repercussions from using
9248                  * potentially defective internal ROM, stop the Rx RISC CPU,
9249                  * which is not required.
9250                  */
9251                 tg3_stop_fw(tp);
9252                 tg3_halt_cpu(tp, RX_CPU_BASE);
9253         }
9254
9255         err = tg3_poll_fw(tp);
9256         if (err)
9257                 return err;
9258
9259         tw32(GRC_MODE, tp->grc_mode);
9260
9261         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9262                 val = tr32(0xc4);
9263
9264                 tw32(0xc4, val | (1 << 15));
9265         }
9266
9267         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9268             tg3_asic_rev(tp) == ASIC_REV_5705) {
9269                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9270                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9271                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9272                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9273         }
9274
9275         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9276                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9277                 val = tp->mac_mode;
9278         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9279                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9280                 val = tp->mac_mode;
9281         } else
9282                 val = 0;
9283
9284         tw32_f(MAC_MODE, val);
9285         udelay(40);
9286
9287         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9288
9289         tg3_mdio_start(tp);
9290
9291         if (tg3_flag(tp, PCI_EXPRESS) &&
9292             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9293             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9294             !tg3_flag(tp, 57765_PLUS)) {
9295                 val = tr32(0x7c00);
9296
9297                 tw32(0x7c00, val | (1 << 25));
9298         }
9299
9300         tg3_restore_clk(tp);
9301
9302         /* Increase the core clock speed to fix tx timeout issue for 5762
9303          * with 100Mbps link speed.
9304          */
9305         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9306                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9307                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9308                      TG3_CPMU_MAC_ORIDE_ENABLE);
9309         }
9310
9311         /* Reprobe ASF enable state.  */
9312         tg3_flag_clear(tp, ENABLE_ASF);
9313         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9314                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9315
9316         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9317         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9318         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9319                 u32 nic_cfg;
9320
9321                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9322                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9323                         tg3_flag_set(tp, ENABLE_ASF);
9324                         tp->last_event_jiffies = jiffies;
9325                         if (tg3_flag(tp, 5750_PLUS))
9326                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9327
9328                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9329                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9330                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9331                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9332                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9333                 }
9334         }
9335
9336         return 0;
9337 }
9338
9339 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9340 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9341 static void __tg3_set_rx_mode(struct net_device *);
9342
9343 /* tp->lock is held. */
9344 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9345 {
9346         int err;
9347
9348         tg3_stop_fw(tp);
9349
9350         tg3_write_sig_pre_reset(tp, kind);
9351
9352         tg3_abort_hw(tp, silent);
9353         err = tg3_chip_reset(tp);
9354
9355         __tg3_set_mac_addr(tp, false);
9356
9357         tg3_write_sig_legacy(tp, kind);
9358         tg3_write_sig_post_reset(tp, kind);
9359
9360         if (tp->hw_stats) {
9361                 /* Save the stats across chip resets... */
9362                 tg3_get_nstats(tp, &tp->net_stats_prev);
9363                 tg3_get_estats(tp, &tp->estats_prev);
9364
9365                 /* And make sure the next sample is new data */
9366                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9367         }
9368
9369         return err;
9370 }
9371
9372 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9373 {
9374         struct tg3 *tp = netdev_priv(dev);
9375         struct sockaddr *addr = p;
9376         int err = 0;
9377         bool skip_mac_1 = false;
9378
9379         if (!is_valid_ether_addr(addr->sa_data))
9380                 return -EADDRNOTAVAIL;
9381
9382         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9383
9384         if (!netif_running(dev))
9385                 return 0;
9386
9387         if (tg3_flag(tp, ENABLE_ASF)) {
9388                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9389
9390                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9391                 addr0_low = tr32(MAC_ADDR_0_LOW);
9392                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9393                 addr1_low = tr32(MAC_ADDR_1_LOW);
9394
9395                 /* Skip MAC addr 1 if ASF is using it. */
9396                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9397                     !(addr1_high == 0 && addr1_low == 0))
9398                         skip_mac_1 = true;
9399         }
9400         spin_lock_bh(&tp->lock);
9401         __tg3_set_mac_addr(tp, skip_mac_1);
9402         __tg3_set_rx_mode(dev);
9403         spin_unlock_bh(&tp->lock);
9404
9405         return err;
9406 }
9407
9408 /* tp->lock is held. */
9409 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9410                            dma_addr_t mapping, u32 maxlen_flags,
9411                            u32 nic_addr)
9412 {
9413         tg3_write_mem(tp,
9414                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9415                       ((u64) mapping >> 32));
9416         tg3_write_mem(tp,
9417                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9418                       ((u64) mapping & 0xffffffff));
9419         tg3_write_mem(tp,
9420                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9421                        maxlen_flags);
9422
9423         if (!tg3_flag(tp, 5705_PLUS))
9424                 tg3_write_mem(tp,
9425                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9426                               nic_addr);
9427 }
9428
9429
9430 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9431 {
9432         int i = 0;
9433
9434         if (!tg3_flag(tp, ENABLE_TSS)) {
9435                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9436                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9437                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9438         } else {
9439                 tw32(HOSTCC_TXCOL_TICKS, 0);
9440                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9441                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9442
9443                 for (; i < tp->txq_cnt; i++) {
9444                         u32 reg;
9445
9446                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9447                         tw32(reg, ec->tx_coalesce_usecs);
9448                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9449                         tw32(reg, ec->tx_max_coalesced_frames);
9450                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9451                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9452                 }
9453         }
9454
9455         for (; i < tp->irq_max - 1; i++) {
9456                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9457                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9458                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9459         }
9460 }
9461
9462 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9463 {
9464         int i = 0;
9465         u32 limit = tp->rxq_cnt;
9466
9467         if (!tg3_flag(tp, ENABLE_RSS)) {
9468                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9469                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9470                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9471                 limit--;
9472         } else {
9473                 tw32(HOSTCC_RXCOL_TICKS, 0);
9474                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9475                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9476         }
9477
9478         for (; i < limit; i++) {
9479                 u32 reg;
9480
9481                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9482                 tw32(reg, ec->rx_coalesce_usecs);
9483                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9484                 tw32(reg, ec->rx_max_coalesced_frames);
9485                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9486                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9487         }
9488
9489         for (; i < tp->irq_max - 1; i++) {
9490                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9491                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9492                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9493         }
9494 }
9495
9496 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9497 {
9498         tg3_coal_tx_init(tp, ec);
9499         tg3_coal_rx_init(tp, ec);
9500
9501         if (!tg3_flag(tp, 5705_PLUS)) {
9502                 u32 val = ec->stats_block_coalesce_usecs;
9503
9504                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9505                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9506
9507                 if (!tp->link_up)
9508                         val = 0;
9509
9510                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9511         }
9512 }
9513
9514 /* tp->lock is held. */
9515 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9516 {
9517         u32 txrcb, limit;
9518
9519         /* Disable all transmit rings but the first. */
9520         if (!tg3_flag(tp, 5705_PLUS))
9521                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9522         else if (tg3_flag(tp, 5717_PLUS))
9523                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9524         else if (tg3_flag(tp, 57765_CLASS) ||
9525                  tg3_asic_rev(tp) == ASIC_REV_5762)
9526                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9527         else
9528                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9529
9530         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9531              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9532                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9533                               BDINFO_FLAGS_DISABLED);
9534 }
9535
9536 /* tp->lock is held. */
9537 static void tg3_tx_rcbs_init(struct tg3 *tp)
9538 {
9539         int i = 0;
9540         u32 txrcb = NIC_SRAM_SEND_RCB;
9541
9542         if (tg3_flag(tp, ENABLE_TSS))
9543                 i++;
9544
9545         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9546                 struct tg3_napi *tnapi = &tp->napi[i];
9547
9548                 if (!tnapi->tx_ring)
9549                         continue;
9550
9551                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9552                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9553                                NIC_SRAM_TX_BUFFER_DESC);
9554         }
9555 }
9556
9557 /* tp->lock is held. */
9558 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9559 {
9560         u32 rxrcb, limit;
9561
9562         /* Disable all receive return rings but the first. */
9563         if (tg3_flag(tp, 5717_PLUS))
9564                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9565         else if (!tg3_flag(tp, 5705_PLUS))
9566                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9567         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9568                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9569                  tg3_flag(tp, 57765_CLASS))
9570                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9571         else
9572                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9573
9574         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9575              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9576                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9577                               BDINFO_FLAGS_DISABLED);
9578 }
9579
9580 /* tp->lock is held. */
9581 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9582 {
9583         int i = 0;
9584         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9585
9586         if (tg3_flag(tp, ENABLE_RSS))
9587                 i++;
9588
9589         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9590                 struct tg3_napi *tnapi = &tp->napi[i];
9591
9592                 if (!tnapi->rx_rcb)
9593                         continue;
9594
9595                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9596                                (tp->rx_ret_ring_mask + 1) <<
9597                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9598         }
9599 }
9600
9601 /* tp->lock is held. */
9602 static void tg3_rings_reset(struct tg3 *tp)
9603 {
9604         int i;
9605         u32 stblk;
9606         struct tg3_napi *tnapi = &tp->napi[0];
9607
9608         tg3_tx_rcbs_disable(tp);
9609
9610         tg3_rx_ret_rcbs_disable(tp);
9611
9612         /* Disable interrupts */
9613         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9614         tp->napi[0].chk_msi_cnt = 0;
9615         tp->napi[0].last_rx_cons = 0;
9616         tp->napi[0].last_tx_cons = 0;
9617
9618         /* Zero mailbox registers. */
9619         if (tg3_flag(tp, SUPPORT_MSIX)) {
9620                 for (i = 1; i < tp->irq_max; i++) {
9621                         tp->napi[i].tx_prod = 0;
9622                         tp->napi[i].tx_cons = 0;
9623                         if (tg3_flag(tp, ENABLE_TSS))
9624                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9625                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9626                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9627                         tp->napi[i].chk_msi_cnt = 0;
9628                         tp->napi[i].last_rx_cons = 0;
9629                         tp->napi[i].last_tx_cons = 0;
9630                 }
9631                 if (!tg3_flag(tp, ENABLE_TSS))
9632                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9633         } else {
9634                 tp->napi[0].tx_prod = 0;
9635                 tp->napi[0].tx_cons = 0;
9636                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9637                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9638         }
9639
9640         /* Make sure the NIC-based send BD rings are disabled. */
9641         if (!tg3_flag(tp, 5705_PLUS)) {
9642                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9643                 for (i = 0; i < 16; i++)
9644                         tw32_tx_mbox(mbox + i * 8, 0);
9645         }
9646
9647         /* Clear status block in ram. */
9648         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9649
9650         /* Set status block DMA address */
9651         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9652              ((u64) tnapi->status_mapping >> 32));
9653         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9654              ((u64) tnapi->status_mapping & 0xffffffff));
9655
9656         stblk = HOSTCC_STATBLCK_RING1;
9657
9658         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9659                 u64 mapping = (u64)tnapi->status_mapping;
9660                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9661                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9662                 stblk += 8;
9663
9664                 /* Clear status block in ram. */
9665                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9666         }
9667
9668         tg3_tx_rcbs_init(tp);
9669         tg3_rx_ret_rcbs_init(tp);
9670 }
9671
9672 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9673 {
9674         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9675
9676         if (!tg3_flag(tp, 5750_PLUS) ||
9677             tg3_flag(tp, 5780_CLASS) ||
9678             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9679             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9680             tg3_flag(tp, 57765_PLUS))
9681                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9682         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9683                  tg3_asic_rev(tp) == ASIC_REV_5787)
9684                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9685         else
9686                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9687
9688         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9689         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9690
9691         val = min(nic_rep_thresh, host_rep_thresh);
9692         tw32(RCVBDI_STD_THRESH, val);
9693
9694         if (tg3_flag(tp, 57765_PLUS))
9695                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9696
9697         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9698                 return;
9699
9700         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9701
9702         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9703
9704         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9705         tw32(RCVBDI_JUMBO_THRESH, val);
9706
9707         if (tg3_flag(tp, 57765_PLUS))
9708                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9709 }
9710
9711 static inline u32 calc_crc(unsigned char *buf, int len)
9712 {
9713         u32 reg;
9714         u32 tmp;
9715         int j, k;
9716
9717         reg = 0xffffffff;
9718
9719         for (j = 0; j < len; j++) {
9720                 reg ^= buf[j];
9721
9722                 for (k = 0; k < 8; k++) {
9723                         tmp = reg & 0x01;
9724
9725                         reg >>= 1;
9726
9727                         if (tmp)
9728                                 reg ^= CRC32_POLY_LE;
9729                 }
9730         }
9731
9732         return ~reg;
9733 }
9734
9735 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9736 {
9737         /* accept or reject all multicast frames */
9738         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9739         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9740         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9741         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9742 }
9743
9744 static void __tg3_set_rx_mode(struct net_device *dev)
9745 {
9746         struct tg3 *tp = netdev_priv(dev);
9747         u32 rx_mode;
9748
9749         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9750                                   RX_MODE_KEEP_VLAN_TAG);
9751
9752 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9753         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9754          * flag clear.
9755          */
9756         if (!tg3_flag(tp, ENABLE_ASF))
9757                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9758 #endif
9759
9760         if (dev->flags & IFF_PROMISC) {
9761                 /* Promiscuous mode. */
9762                 rx_mode |= RX_MODE_PROMISC;
9763         } else if (dev->flags & IFF_ALLMULTI) {
9764                 /* Accept all multicast. */
9765                 tg3_set_multi(tp, 1);
9766         } else if (netdev_mc_empty(dev)) {
9767                 /* Reject all multicast. */
9768                 tg3_set_multi(tp, 0);
9769         } else {
9770                 /* Accept one or more multicast(s). */
9771                 struct netdev_hw_addr *ha;
9772                 u32 mc_filter[4] = { 0, };
9773                 u32 regidx;
9774                 u32 bit;
9775                 u32 crc;
9776
9777                 netdev_for_each_mc_addr(ha, dev) {
9778                         crc = calc_crc(ha->addr, ETH_ALEN);
9779                         bit = ~crc & 0x7f;
9780                         regidx = (bit & 0x60) >> 5;
9781                         bit &= 0x1f;
9782                         mc_filter[regidx] |= (1 << bit);
9783                 }
9784
9785                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9786                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9787                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9788                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9789         }
9790
9791         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9792                 rx_mode |= RX_MODE_PROMISC;
9793         } else if (!(dev->flags & IFF_PROMISC)) {
9794                 /* Add all entries into to the mac addr filter list */
9795                 int i = 0;
9796                 struct netdev_hw_addr *ha;
9797
9798                 netdev_for_each_uc_addr(ha, dev) {
9799                         __tg3_set_one_mac_addr(tp, ha->addr,
9800                                                i + TG3_UCAST_ADDR_IDX(tp));
9801                         i++;
9802                 }
9803         }
9804
9805         if (rx_mode != tp->rx_mode) {
9806                 tp->rx_mode = rx_mode;
9807                 tw32_f(MAC_RX_MODE, rx_mode);
9808                 udelay(10);
9809         }
9810 }
9811
9812 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9813 {
9814         int i;
9815
9816         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9817                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9818 }
9819
9820 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9821 {
9822         int i;
9823
9824         if (!tg3_flag(tp, SUPPORT_MSIX))
9825                 return;
9826
9827         if (tp->rxq_cnt == 1) {
9828                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9829                 return;
9830         }
9831
9832         /* Validate table against current IRQ count */
9833         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9834                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9835                         break;
9836         }
9837
9838         if (i != TG3_RSS_INDIR_TBL_SIZE)
9839                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9840 }
9841
9842 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9843 {
9844         int i = 0;
9845         u32 reg = MAC_RSS_INDIR_TBL_0;
9846
9847         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9848                 u32 val = tp->rss_ind_tbl[i];
9849                 i++;
9850                 for (; i % 8; i++) {
9851                         val <<= 4;
9852                         val |= tp->rss_ind_tbl[i];
9853                 }
9854                 tw32(reg, val);
9855                 reg += 4;
9856         }
9857 }
9858
9859 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9860 {
9861         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9862                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9863         else
9864                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9865 }
9866
9867 /* tp->lock is held. */
9868 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9869 {
9870         u32 val, rdmac_mode;
9871         int i, err, limit;
9872         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9873
9874         tg3_disable_ints(tp);
9875
9876         tg3_stop_fw(tp);
9877
9878         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9879
9880         if (tg3_flag(tp, INIT_COMPLETE))
9881                 tg3_abort_hw(tp, 1);
9882
9883         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9884             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9885                 tg3_phy_pull_config(tp);
9886                 tg3_eee_pull_config(tp, NULL);
9887                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9888         }
9889
9890         /* Enable MAC control of LPI */
9891         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9892                 tg3_setup_eee(tp);
9893
9894         if (reset_phy)
9895                 tg3_phy_reset(tp);
9896
9897         err = tg3_chip_reset(tp);
9898         if (err)
9899                 return err;
9900
9901         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9902
9903         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9904                 val = tr32(TG3_CPMU_CTRL);
9905                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9906                 tw32(TG3_CPMU_CTRL, val);
9907
9908                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9909                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9910                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9911                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9912
9913                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9914                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9915                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9916                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9917
9918                 val = tr32(TG3_CPMU_HST_ACC);
9919                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9920                 val |= CPMU_HST_ACC_MACCLK_6_25;
9921                 tw32(TG3_CPMU_HST_ACC, val);
9922         }
9923
9924         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9925                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9926                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9927                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9928                 tw32(PCIE_PWR_MGMT_THRESH, val);
9929
9930                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9931                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9932
9933                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9934
9935                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9936                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9937         }
9938
9939         if (tg3_flag(tp, L1PLLPD_EN)) {
9940                 u32 grc_mode = tr32(GRC_MODE);
9941
9942                 /* Access the lower 1K of PL PCIE block registers. */
9943                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9944                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9945
9946                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9947                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9948                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9949
9950                 tw32(GRC_MODE, grc_mode);
9951         }
9952
9953         if (tg3_flag(tp, 57765_CLASS)) {
9954                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9955                         u32 grc_mode = tr32(GRC_MODE);
9956
9957                         /* Access the lower 1K of PL PCIE block registers. */
9958                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9959                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9960
9961                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9962                                    TG3_PCIE_PL_LO_PHYCTL5);
9963                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9964                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9965
9966                         tw32(GRC_MODE, grc_mode);
9967                 }
9968
9969                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9970                         u32 grc_mode;
9971
9972                         /* Fix transmit hangs */
9973                         val = tr32(TG3_CPMU_PADRNG_CTL);
9974                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9975                         tw32(TG3_CPMU_PADRNG_CTL, val);
9976
9977                         grc_mode = tr32(GRC_MODE);
9978
9979                         /* Access the lower 1K of DL PCIE block registers. */
9980                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9981                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9982
9983                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9984                                    TG3_PCIE_DL_LO_FTSMAX);
9985                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9986                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9987                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9988
9989                         tw32(GRC_MODE, grc_mode);
9990                 }
9991
9992                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9993                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9994                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9995                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9996         }
9997
9998         /* This works around an issue with Athlon chipsets on
9999          * B3 tigon3 silicon.  This bit has no effect on any
10000          * other revision.  But do not set this on PCI Express
10001          * chips and don't even touch the clocks if the CPMU is present.
10002          */
10003         if (!tg3_flag(tp, CPMU_PRESENT)) {
10004                 if (!tg3_flag(tp, PCI_EXPRESS))
10005                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10006                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10007         }
10008
10009         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10010             tg3_flag(tp, PCIX_MODE)) {
10011                 val = tr32(TG3PCI_PCISTATE);
10012                 val |= PCISTATE_RETRY_SAME_DMA;
10013                 tw32(TG3PCI_PCISTATE, val);
10014         }
10015
10016         if (tg3_flag(tp, ENABLE_APE)) {
10017                 /* Allow reads and writes to the
10018                  * APE register and memory space.
10019                  */
10020                 val = tr32(TG3PCI_PCISTATE);
10021                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10022                        PCISTATE_ALLOW_APE_SHMEM_WR |
10023                        PCISTATE_ALLOW_APE_PSPACE_WR;
10024                 tw32(TG3PCI_PCISTATE, val);
10025         }
10026
10027         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10028                 /* Enable some hw fixes.  */
10029                 val = tr32(TG3PCI_MSI_DATA);
10030                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10031                 tw32(TG3PCI_MSI_DATA, val);
10032         }
10033
10034         /* Descriptor ring init may make accesses to the
10035          * NIC SRAM area to setup the TX descriptors, so we
10036          * can only do this after the hardware has been
10037          * successfully reset.
10038          */
10039         err = tg3_init_rings(tp);
10040         if (err)
10041                 return err;
10042
10043         if (tg3_flag(tp, 57765_PLUS)) {
10044                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10045                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10046                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10047                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10048                 if (!tg3_flag(tp, 57765_CLASS) &&
10049                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10050                     tg3_asic_rev(tp) != ASIC_REV_5762)
10051                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10052                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10053         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10054                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10055                 /* This value is determined during the probe time DMA
10056                  * engine test, tg3_test_dma.
10057                  */
10058                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10059         }
10060
10061         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10062                           GRC_MODE_4X_NIC_SEND_RINGS |
10063                           GRC_MODE_NO_TX_PHDR_CSUM |
10064                           GRC_MODE_NO_RX_PHDR_CSUM);
10065         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10066
10067         /* Pseudo-header checksum is done by hardware logic and not
10068          * the offload processers, so make the chip do the pseudo-
10069          * header checksums on receive.  For transmit it is more
10070          * convenient to do the pseudo-header checksum in software
10071          * as Linux does that on transmit for us in all cases.
10072          */
10073         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10074
10075         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10076         if (tp->rxptpctl)
10077                 tw32(TG3_RX_PTP_CTL,
10078                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10079
10080         if (tg3_flag(tp, PTP_CAPABLE))
10081                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10082
10083         tw32(GRC_MODE, tp->grc_mode | val);
10084
10085         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10086          * south bridge limitation. As a workaround, Driver is setting MRRS
10087          * to 2048 instead of default 4096.
10088          */
10089         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10090             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10091                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10092                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10093         }
10094
10095         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10096         val = tr32(GRC_MISC_CFG);
10097         val &= ~0xff;
10098         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10099         tw32(GRC_MISC_CFG, val);
10100
10101         /* Initialize MBUF/DESC pool. */
10102         if (tg3_flag(tp, 5750_PLUS)) {
10103                 /* Do nothing.  */
10104         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10105                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10106                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10107                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10108                 else
10109                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10110                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10111                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10112         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10113                 int fw_len;
10114
10115                 fw_len = tp->fw_len;
10116                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10117                 tw32(BUFMGR_MB_POOL_ADDR,
10118                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10119                 tw32(BUFMGR_MB_POOL_SIZE,
10120                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10121         }
10122
10123         if (tp->dev->mtu <= ETH_DATA_LEN) {
10124                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10125                      tp->bufmgr_config.mbuf_read_dma_low_water);
10126                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10127                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10128                 tw32(BUFMGR_MB_HIGH_WATER,
10129                      tp->bufmgr_config.mbuf_high_water);
10130         } else {
10131                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10132                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10133                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10134                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10135                 tw32(BUFMGR_MB_HIGH_WATER,
10136                      tp->bufmgr_config.mbuf_high_water_jumbo);
10137         }
10138         tw32(BUFMGR_DMA_LOW_WATER,
10139              tp->bufmgr_config.dma_low_water);
10140         tw32(BUFMGR_DMA_HIGH_WATER,
10141              tp->bufmgr_config.dma_high_water);
10142
10143         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10144         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10145                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10146         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10147             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10148             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10149             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10150                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10151         tw32(BUFMGR_MODE, val);
10152         for (i = 0; i < 2000; i++) {
10153                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10154                         break;
10155                 udelay(10);
10156         }
10157         if (i >= 2000) {
10158                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10159                 return -ENODEV;
10160         }
10161
10162         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10163                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10164
10165         tg3_setup_rxbd_thresholds(tp);
10166
10167         /* Initialize TG3_BDINFO's at:
10168          *  RCVDBDI_STD_BD:     standard eth size rx ring
10169          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10170          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10171          *
10172          * like so:
10173          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10174          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10175          *                              ring attribute flags
10176          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10177          *
10178          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10179          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10180          *
10181          * The size of each ring is fixed in the firmware, but the location is
10182          * configurable.
10183          */
10184         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10185              ((u64) tpr->rx_std_mapping >> 32));
10186         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10187              ((u64) tpr->rx_std_mapping & 0xffffffff));
10188         if (!tg3_flag(tp, 5717_PLUS))
10189                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10190                      NIC_SRAM_RX_BUFFER_DESC);
10191
10192         /* Disable the mini ring */
10193         if (!tg3_flag(tp, 5705_PLUS))
10194                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10195                      BDINFO_FLAGS_DISABLED);
10196
10197         /* Program the jumbo buffer descriptor ring control
10198          * blocks on those devices that have them.
10199          */
10200         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10201             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10202
10203                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10204                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10205                              ((u64) tpr->rx_jmb_mapping >> 32));
10206                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10207                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10208                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10209                               BDINFO_FLAGS_MAXLEN_SHIFT;
10210                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10211                              val | BDINFO_FLAGS_USE_EXT_RECV);
10212                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10213                             tg3_flag(tp, 57765_CLASS) ||
10214                             tg3_asic_rev(tp) == ASIC_REV_5762)
10215                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10216                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10217                 } else {
10218                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10219                              BDINFO_FLAGS_DISABLED);
10220                 }
10221
10222                 if (tg3_flag(tp, 57765_PLUS)) {
10223                         val = TG3_RX_STD_RING_SIZE(tp);
10224                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10225                         val |= (TG3_RX_STD_DMA_SZ << 2);
10226                 } else
10227                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10228         } else
10229                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10230
10231         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10232
10233         tpr->rx_std_prod_idx = tp->rx_pending;
10234         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10235
10236         tpr->rx_jmb_prod_idx =
10237                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10238         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10239
10240         tg3_rings_reset(tp);
10241
10242         /* Initialize MAC address and backoff seed. */
10243         __tg3_set_mac_addr(tp, false);
10244
10245         /* MTU + ethernet header + FCS + optional VLAN tag */
10246         tw32(MAC_RX_MTU_SIZE,
10247              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10248
10249         /* The slot time is changed by tg3_setup_phy if we
10250          * run at gigabit with half duplex.
10251          */
10252         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10253               (6 << TX_LENGTHS_IPG_SHIFT) |
10254               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10255
10256         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10257             tg3_asic_rev(tp) == ASIC_REV_5762)
10258                 val |= tr32(MAC_TX_LENGTHS) &
10259                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10260                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10261
10262         tw32(MAC_TX_LENGTHS, val);
10263
10264         /* Receive rules. */
10265         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10266         tw32(RCVLPC_CONFIG, 0x0181);
10267
10268         /* Calculate RDMAC_MODE setting early, we need it to determine
10269          * the RCVLPC_STATE_ENABLE mask.
10270          */
10271         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10272                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10273                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10274                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10275                       RDMAC_MODE_LNGREAD_ENAB);
10276
10277         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10278                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10279
10280         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10281             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10282             tg3_asic_rev(tp) == ASIC_REV_57780)
10283                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10284                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10285                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10286
10287         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10288             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10289                 if (tg3_flag(tp, TSO_CAPABLE) &&
10290                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10291                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10292                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10293                            !tg3_flag(tp, IS_5788)) {
10294                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10295                 }
10296         }
10297
10298         if (tg3_flag(tp, PCI_EXPRESS))
10299                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10300
10301         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10302                 tp->dma_limit = 0;
10303                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10304                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10305                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10306                 }
10307         }
10308
10309         if (tg3_flag(tp, HW_TSO_1) ||
10310             tg3_flag(tp, HW_TSO_2) ||
10311             tg3_flag(tp, HW_TSO_3))
10312                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10313
10314         if (tg3_flag(tp, 57765_PLUS) ||
10315             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10316             tg3_asic_rev(tp) == ASIC_REV_57780)
10317                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10318
10319         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10320             tg3_asic_rev(tp) == ASIC_REV_5762)
10321                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10322
10323         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10324             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10325             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10326             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10327             tg3_flag(tp, 57765_PLUS)) {
10328                 u32 tgtreg;
10329
10330                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10331                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10332                 else
10333                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10334
10335                 val = tr32(tgtreg);
10336                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10337                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10338                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10339                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10340                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10341                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10342                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10343                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10344                 }
10345                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10346         }
10347
10348         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10349             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10350             tg3_asic_rev(tp) == ASIC_REV_5762) {
10351                 u32 tgtreg;
10352
10353                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10354                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10355                 else
10356                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10357
10358                 val = tr32(tgtreg);
10359                 tw32(tgtreg, val |
10360                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10361                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10362         }
10363
10364         /* Receive/send statistics. */
10365         if (tg3_flag(tp, 5750_PLUS)) {
10366                 val = tr32(RCVLPC_STATS_ENABLE);
10367                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10368                 tw32(RCVLPC_STATS_ENABLE, val);
10369         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10370                    tg3_flag(tp, TSO_CAPABLE)) {
10371                 val = tr32(RCVLPC_STATS_ENABLE);
10372                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10373                 tw32(RCVLPC_STATS_ENABLE, val);
10374         } else {
10375                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10376         }
10377         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10378         tw32(SNDDATAI_STATSENAB, 0xffffff);
10379         tw32(SNDDATAI_STATSCTRL,
10380              (SNDDATAI_SCTRL_ENABLE |
10381               SNDDATAI_SCTRL_FASTUPD));
10382
10383         /* Setup host coalescing engine. */
10384         tw32(HOSTCC_MODE, 0);
10385         for (i = 0; i < 2000; i++) {
10386                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10387                         break;
10388                 udelay(10);
10389         }
10390
10391         __tg3_set_coalesce(tp, &tp->coal);
10392
10393         if (!tg3_flag(tp, 5705_PLUS)) {
10394                 /* Status/statistics block address.  See tg3_timer,
10395                  * the tg3_periodic_fetch_stats call there, and
10396                  * tg3_get_stats to see how this works for 5705/5750 chips.
10397                  */
10398                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10399                      ((u64) tp->stats_mapping >> 32));
10400                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10401                      ((u64) tp->stats_mapping & 0xffffffff));
10402                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10403
10404                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10405
10406                 /* Clear statistics and status block memory areas */
10407                 for (i = NIC_SRAM_STATS_BLK;
10408                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10409                      i += sizeof(u32)) {
10410                         tg3_write_mem(tp, i, 0);
10411                         udelay(40);
10412                 }
10413         }
10414
10415         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10416
10417         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10418         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10419         if (!tg3_flag(tp, 5705_PLUS))
10420                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10421
10422         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10423                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10424                 /* reset to prevent losing 1st rx packet intermittently */
10425                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10426                 udelay(10);
10427         }
10428
10429         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10430                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10431                         MAC_MODE_FHDE_ENABLE;
10432         if (tg3_flag(tp, ENABLE_APE))
10433                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10434         if (!tg3_flag(tp, 5705_PLUS) &&
10435             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10436             tg3_asic_rev(tp) != ASIC_REV_5700)
10437                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10438         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10439         udelay(40);
10440
10441         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10442          * If TG3_FLAG_IS_NIC is zero, we should read the
10443          * register to preserve the GPIO settings for LOMs. The GPIOs,
10444          * whether used as inputs or outputs, are set by boot code after
10445          * reset.
10446          */
10447         if (!tg3_flag(tp, IS_NIC)) {
10448                 u32 gpio_mask;
10449
10450                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10451                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10452                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10453
10454                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10455                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10456                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10457
10458                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10459                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10460
10461                 tp->grc_local_ctrl &= ~gpio_mask;
10462                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10463
10464                 /* GPIO1 must be driven high for eeprom write protect */
10465                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10466                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10467                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10468         }
10469         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10470         udelay(100);
10471
10472         if (tg3_flag(tp, USING_MSIX)) {
10473                 val = tr32(MSGINT_MODE);
10474                 val |= MSGINT_MODE_ENABLE;
10475                 if (tp->irq_cnt > 1)
10476                         val |= MSGINT_MODE_MULTIVEC_EN;
10477                 if (!tg3_flag(tp, 1SHOT_MSI))
10478                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10479                 tw32(MSGINT_MODE, val);
10480         }
10481
10482         if (!tg3_flag(tp, 5705_PLUS)) {
10483                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10484                 udelay(40);
10485         }
10486
10487         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10488                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10489                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10490                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10491                WDMAC_MODE_LNGREAD_ENAB);
10492
10493         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10494             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10495                 if (tg3_flag(tp, TSO_CAPABLE) &&
10496                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10497                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10498                         /* nothing */
10499                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10500                            !tg3_flag(tp, IS_5788)) {
10501                         val |= WDMAC_MODE_RX_ACCEL;
10502                 }
10503         }
10504
10505         /* Enable host coalescing bug fix */
10506         if (tg3_flag(tp, 5755_PLUS))
10507                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10508
10509         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10510                 val |= WDMAC_MODE_BURST_ALL_DATA;
10511
10512         tw32_f(WDMAC_MODE, val);
10513         udelay(40);
10514
10515         if (tg3_flag(tp, PCIX_MODE)) {
10516                 u16 pcix_cmd;
10517
10518                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10519                                      &pcix_cmd);
10520                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10521                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10522                         pcix_cmd |= PCI_X_CMD_READ_2K;
10523                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10524                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10525                         pcix_cmd |= PCI_X_CMD_READ_2K;
10526                 }
10527                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10528                                       pcix_cmd);
10529         }
10530
10531         tw32_f(RDMAC_MODE, rdmac_mode);
10532         udelay(40);
10533
10534         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10535             tg3_asic_rev(tp) == ASIC_REV_5720) {
10536                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10537                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10538                                 break;
10539                 }
10540                 if (i < TG3_NUM_RDMA_CHANNELS) {
10541                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10542                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10543                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10544                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10545                 }
10546         }
10547
10548         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10549         if (!tg3_flag(tp, 5705_PLUS))
10550                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10551
10552         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10553                 tw32(SNDDATAC_MODE,
10554                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10555         else
10556                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10557
10558         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10559         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10560         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10561         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10562                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10563         tw32(RCVDBDI_MODE, val);
10564         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10565         if (tg3_flag(tp, HW_TSO_1) ||
10566             tg3_flag(tp, HW_TSO_2) ||
10567             tg3_flag(tp, HW_TSO_3))
10568                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10569         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10570         if (tg3_flag(tp, ENABLE_TSS))
10571                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10572         tw32(SNDBDI_MODE, val);
10573         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10574
10575         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10576                 err = tg3_load_5701_a0_firmware_fix(tp);
10577                 if (err)
10578                         return err;
10579         }
10580
10581         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10582                 /* Ignore any errors for the firmware download. If download
10583                  * fails, the device will operate with EEE disabled
10584                  */
10585                 tg3_load_57766_firmware(tp);
10586         }
10587
10588         if (tg3_flag(tp, TSO_CAPABLE)) {
10589                 err = tg3_load_tso_firmware(tp);
10590                 if (err)
10591                         return err;
10592         }
10593
10594         tp->tx_mode = TX_MODE_ENABLE;
10595
10596         if (tg3_flag(tp, 5755_PLUS) ||
10597             tg3_asic_rev(tp) == ASIC_REV_5906)
10598                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10599
10600         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10601             tg3_asic_rev(tp) == ASIC_REV_5762) {
10602                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10603                 tp->tx_mode &= ~val;
10604                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10605         }
10606
10607         tw32_f(MAC_TX_MODE, tp->tx_mode);
10608         udelay(100);
10609
10610         if (tg3_flag(tp, ENABLE_RSS)) {
10611                 u32 rss_key[10];
10612
10613                 tg3_rss_write_indir_tbl(tp);
10614
10615                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10616
10617                 for (i = 0; i < 10 ; i++)
10618                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10619         }
10620
10621         tp->rx_mode = RX_MODE_ENABLE;
10622         if (tg3_flag(tp, 5755_PLUS))
10623                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10624
10625         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10626                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10627
10628         if (tg3_flag(tp, ENABLE_RSS))
10629                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10630                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10631                                RX_MODE_RSS_IPV6_HASH_EN |
10632                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10633                                RX_MODE_RSS_IPV4_HASH_EN |
10634                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10635
10636         tw32_f(MAC_RX_MODE, tp->rx_mode);
10637         udelay(10);
10638
10639         tw32(MAC_LED_CTRL, tp->led_ctrl);
10640
10641         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10642         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10643                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10644                 udelay(10);
10645         }
10646         tw32_f(MAC_RX_MODE, tp->rx_mode);
10647         udelay(10);
10648
10649         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10650                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10651                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10652                         /* Set drive transmission level to 1.2V  */
10653                         /* only if the signal pre-emphasis bit is not set  */
10654                         val = tr32(MAC_SERDES_CFG);
10655                         val &= 0xfffff000;
10656                         val |= 0x880;
10657                         tw32(MAC_SERDES_CFG, val);
10658                 }
10659                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10660                         tw32(MAC_SERDES_CFG, 0x616000);
10661         }
10662
10663         /* Prevent chip from dropping frames when flow control
10664          * is enabled.
10665          */
10666         if (tg3_flag(tp, 57765_CLASS))
10667                 val = 1;
10668         else
10669                 val = 2;
10670         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10671
10672         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10673             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10674                 /* Use hardware link auto-negotiation */
10675                 tg3_flag_set(tp, HW_AUTONEG);
10676         }
10677
10678         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10679             tg3_asic_rev(tp) == ASIC_REV_5714) {
10680                 u32 tmp;
10681
10682                 tmp = tr32(SERDES_RX_CTRL);
10683                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10684                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10685                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10686                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10687         }
10688
10689         if (!tg3_flag(tp, USE_PHYLIB)) {
10690                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10691                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10692
10693                 err = tg3_setup_phy(tp, false);
10694                 if (err)
10695                         return err;
10696
10697                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10698                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10699                         u32 tmp;
10700
10701                         /* Clear CRC stats. */
10702                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10703                                 tg3_writephy(tp, MII_TG3_TEST1,
10704                                              tmp | MII_TG3_TEST1_CRC_EN);
10705                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10706                         }
10707                 }
10708         }
10709
10710         __tg3_set_rx_mode(tp->dev);
10711
10712         /* Initialize receive rules. */
10713         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10714         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10715         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10716         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10717
10718         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10719                 limit = 8;
10720         else
10721                 limit = 16;
10722         if (tg3_flag(tp, ENABLE_ASF))
10723                 limit -= 4;
10724         switch (limit) {
10725         case 16:
10726                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10727                 /* fall through */
10728         case 15:
10729                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10730                 /* fall through */
10731         case 14:
10732                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10733                 /* fall through */
10734         case 13:
10735                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10736                 /* fall through */
10737         case 12:
10738                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10739                 /* fall through */
10740         case 11:
10741                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10742                 /* fall through */
10743         case 10:
10744                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10745                 /* fall through */
10746         case 9:
10747                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10748                 /* fall through */
10749         case 8:
10750                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10751                 /* fall through */
10752         case 7:
10753                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10754                 /* fall through */
10755         case 6:
10756                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10757                 /* fall through */
10758         case 5:
10759                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10760                 /* fall through */
10761         case 4:
10762                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10763         case 3:
10764                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10765         case 2:
10766         case 1:
10767
10768         default:
10769                 break;
10770         }
10771
10772         if (tg3_flag(tp, ENABLE_APE))
10773                 /* Write our heartbeat update interval to APE. */
10774                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10775                                 APE_HOST_HEARTBEAT_INT_5SEC);
10776
10777         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10778
10779         return 0;
10780 }
10781
10782 /* Called at device open time to get the chip ready for
10783  * packet processing.  Invoked with tp->lock held.
10784  */
10785 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10786 {
10787         /* Chip may have been just powered on. If so, the boot code may still
10788          * be running initialization. Wait for it to finish to avoid races in
10789          * accessing the hardware.
10790          */
10791         tg3_enable_register_access(tp);
10792         tg3_poll_fw(tp);
10793
10794         tg3_switch_clocks(tp);
10795
10796         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10797
10798         return tg3_reset_hw(tp, reset_phy);
10799 }
10800
10801 #ifdef CONFIG_TIGON3_HWMON
10802 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10803 {
10804         int i;
10805
10806         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10807                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10808
10809                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10810                 off += len;
10811
10812                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10813                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10814                         memset(ocir, 0, TG3_OCIR_LEN);
10815         }
10816 }
10817
10818 /* sysfs attributes for hwmon */
10819 static ssize_t tg3_show_temp(struct device *dev,
10820                              struct device_attribute *devattr, char *buf)
10821 {
10822         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10823         struct tg3 *tp = dev_get_drvdata(dev);
10824         u32 temperature;
10825
10826         spin_lock_bh(&tp->lock);
10827         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10828                                 sizeof(temperature));
10829         spin_unlock_bh(&tp->lock);
10830         return sprintf(buf, "%u\n", temperature * 1000);
10831 }
10832
10833
10834 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10835                           TG3_TEMP_SENSOR_OFFSET);
10836 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10837                           TG3_TEMP_CAUTION_OFFSET);
10838 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10839                           TG3_TEMP_MAX_OFFSET);
10840
10841 static struct attribute *tg3_attrs[] = {
10842         &sensor_dev_attr_temp1_input.dev_attr.attr,
10843         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10844         &sensor_dev_attr_temp1_max.dev_attr.attr,
10845         NULL
10846 };
10847 ATTRIBUTE_GROUPS(tg3);
10848
10849 static void tg3_hwmon_close(struct tg3 *tp)
10850 {
10851         if (tp->hwmon_dev) {
10852                 hwmon_device_unregister(tp->hwmon_dev);
10853                 tp->hwmon_dev = NULL;
10854         }
10855 }
10856
10857 static void tg3_hwmon_open(struct tg3 *tp)
10858 {
10859         int i;
10860         u32 size = 0;
10861         struct pci_dev *pdev = tp->pdev;
10862         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10863
10864         tg3_sd_scan_scratchpad(tp, ocirs);
10865
10866         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10867                 if (!ocirs[i].src_data_length)
10868                         continue;
10869
10870                 size += ocirs[i].src_hdr_length;
10871                 size += ocirs[i].src_data_length;
10872         }
10873
10874         if (!size)
10875                 return;
10876
10877         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10878                                                           tp, tg3_groups);
10879         if (IS_ERR(tp->hwmon_dev)) {
10880                 tp->hwmon_dev = NULL;
10881                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10882         }
10883 }
10884 #else
10885 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10886 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10887 #endif /* CONFIG_TIGON3_HWMON */
10888
10889
10890 #define TG3_STAT_ADD32(PSTAT, REG) \
10891 do {    u32 __val = tr32(REG); \
10892         (PSTAT)->low += __val; \
10893         if ((PSTAT)->low < __val) \
10894                 (PSTAT)->high += 1; \
10895 } while (0)
10896
10897 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10898 {
10899         struct tg3_hw_stats *sp = tp->hw_stats;
10900
10901         if (!tp->link_up)
10902                 return;
10903
10904         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10905         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10906         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10907         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10908         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10909         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10910         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10911         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10912         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10913         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10914         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10915         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10916         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10917         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10918                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10919                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10920                 u32 val;
10921
10922                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10923                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10924                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10925                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10926         }
10927
10928         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10929         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10930         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10931         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10932         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10933         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10934         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10935         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10936         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10937         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10938         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10939         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10940         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10941         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10942
10943         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10944         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10945             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10946             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10947             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10948                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10949         } else {
10950                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10951                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10952                 if (val) {
10953                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10954                         sp->rx_discards.low += val;
10955                         if (sp->rx_discards.low < val)
10956                                 sp->rx_discards.high += 1;
10957                 }
10958                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10959         }
10960         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10961 }
10962
10963 static void tg3_chk_missed_msi(struct tg3 *tp)
10964 {
10965         u32 i;
10966
10967         for (i = 0; i < tp->irq_cnt; i++) {
10968                 struct tg3_napi *tnapi = &tp->napi[i];
10969
10970                 if (tg3_has_work(tnapi)) {
10971                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10972                             tnapi->last_tx_cons == tnapi->tx_cons) {
10973                                 if (tnapi->chk_msi_cnt < 1) {
10974                                         tnapi->chk_msi_cnt++;
10975                                         return;
10976                                 }
10977                                 tg3_msi(0, tnapi);
10978                         }
10979                 }
10980                 tnapi->chk_msi_cnt = 0;
10981                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10982                 tnapi->last_tx_cons = tnapi->tx_cons;
10983         }
10984 }
10985
10986 static void tg3_timer(struct timer_list *t)
10987 {
10988         struct tg3 *tp = from_timer(tp, t, timer);
10989
10990         spin_lock(&tp->lock);
10991
10992         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10993                 spin_unlock(&tp->lock);
10994                 goto restart_timer;
10995         }
10996
10997         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10998             tg3_flag(tp, 57765_CLASS))
10999                 tg3_chk_missed_msi(tp);
11000
11001         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11002                 /* BCM4785: Flush posted writes from GbE to host memory. */
11003                 tr32(HOSTCC_MODE);
11004         }
11005
11006         if (!tg3_flag(tp, TAGGED_STATUS)) {
11007                 /* All of this garbage is because when using non-tagged
11008                  * IRQ status the mailbox/status_block protocol the chip
11009                  * uses with the cpu is race prone.
11010                  */
11011                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11012                         tw32(GRC_LOCAL_CTRL,
11013                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11014                 } else {
11015                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11016                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11017                 }
11018
11019                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11020                         spin_unlock(&tp->lock);
11021                         tg3_reset_task_schedule(tp);
11022                         goto restart_timer;
11023                 }
11024         }
11025
11026         /* This part only runs once per second. */
11027         if (!--tp->timer_counter) {
11028                 if (tg3_flag(tp, 5705_PLUS))
11029                         tg3_periodic_fetch_stats(tp);
11030
11031                 if (tp->setlpicnt && !--tp->setlpicnt)
11032                         tg3_phy_eee_enable(tp);
11033
11034                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11035                         u32 mac_stat;
11036                         int phy_event;
11037
11038                         mac_stat = tr32(MAC_STATUS);
11039
11040                         phy_event = 0;
11041                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11042                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11043                                         phy_event = 1;
11044                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11045                                 phy_event = 1;
11046
11047                         if (phy_event)
11048                                 tg3_setup_phy(tp, false);
11049                 } else if (tg3_flag(tp, POLL_SERDES)) {
11050                         u32 mac_stat = tr32(MAC_STATUS);
11051                         int need_setup = 0;
11052
11053                         if (tp->link_up &&
11054                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11055                                 need_setup = 1;
11056                         }
11057                         if (!tp->link_up &&
11058                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11059                                          MAC_STATUS_SIGNAL_DET))) {
11060                                 need_setup = 1;
11061                         }
11062                         if (need_setup) {
11063                                 if (!tp->serdes_counter) {
11064                                         tw32_f(MAC_MODE,
11065                                              (tp->mac_mode &
11066                                               ~MAC_MODE_PORT_MODE_MASK));
11067                                         udelay(40);
11068                                         tw32_f(MAC_MODE, tp->mac_mode);
11069                                         udelay(40);
11070                                 }
11071                                 tg3_setup_phy(tp, false);
11072                         }
11073                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11074                            tg3_flag(tp, 5780_CLASS)) {
11075                         tg3_serdes_parallel_detect(tp);
11076                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11077                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11078                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11079                                          TG3_CPMU_STATUS_LINK_MASK);
11080
11081                         if (link_up != tp->link_up)
11082                                 tg3_setup_phy(tp, false);
11083                 }
11084
11085                 tp->timer_counter = tp->timer_multiplier;
11086         }
11087
11088         /* Heartbeat is only sent once every 2 seconds.
11089          *
11090          * The heartbeat is to tell the ASF firmware that the host
11091          * driver is still alive.  In the event that the OS crashes,
11092          * ASF needs to reset the hardware to free up the FIFO space
11093          * that may be filled with rx packets destined for the host.
11094          * If the FIFO is full, ASF will no longer function properly.
11095          *
11096          * Unintended resets have been reported on real time kernels
11097          * where the timer doesn't run on time.  Netpoll will also have
11098          * same problem.
11099          *
11100          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11101          * to check the ring condition when the heartbeat is expiring
11102          * before doing the reset.  This will prevent most unintended
11103          * resets.
11104          */
11105         if (!--tp->asf_counter) {
11106                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11107                         tg3_wait_for_event_ack(tp);
11108
11109                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11110                                       FWCMD_NICDRV_ALIVE3);
11111                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11112                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11113                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11114
11115                         tg3_generate_fw_event(tp);
11116                 }
11117                 tp->asf_counter = tp->asf_multiplier;
11118         }
11119
11120         /* Update the APE heartbeat every 5 seconds.*/
11121         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11122
11123         spin_unlock(&tp->lock);
11124
11125 restart_timer:
11126         tp->timer.expires = jiffies + tp->timer_offset;
11127         add_timer(&tp->timer);
11128 }
11129
11130 static void tg3_timer_init(struct tg3 *tp)
11131 {
11132         if (tg3_flag(tp, TAGGED_STATUS) &&
11133             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11134             !tg3_flag(tp, 57765_CLASS))
11135                 tp->timer_offset = HZ;
11136         else
11137                 tp->timer_offset = HZ / 10;
11138
11139         BUG_ON(tp->timer_offset > HZ);
11140
11141         tp->timer_multiplier = (HZ / tp->timer_offset);
11142         tp->asf_multiplier = (HZ / tp->timer_offset) *
11143                              TG3_FW_UPDATE_FREQ_SEC;
11144
11145         timer_setup(&tp->timer, tg3_timer, 0);
11146 }
11147
11148 static void tg3_timer_start(struct tg3 *tp)
11149 {
11150         tp->asf_counter   = tp->asf_multiplier;
11151         tp->timer_counter = tp->timer_multiplier;
11152
11153         tp->timer.expires = jiffies + tp->timer_offset;
11154         add_timer(&tp->timer);
11155 }
11156
11157 static void tg3_timer_stop(struct tg3 *tp)
11158 {
11159         del_timer_sync(&tp->timer);
11160 }
11161
11162 /* Restart hardware after configuration changes, self-test, etc.
11163  * Invoked with tp->lock held.
11164  */
11165 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11166         __releases(tp->lock)
11167         __acquires(tp->lock)
11168 {
11169         int err;
11170
11171         err = tg3_init_hw(tp, reset_phy);
11172         if (err) {
11173                 netdev_err(tp->dev,
11174                            "Failed to re-initialize device, aborting\n");
11175                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11176                 tg3_full_unlock(tp);
11177                 tg3_timer_stop(tp);
11178                 tp->irq_sync = 0;
11179                 tg3_napi_enable(tp);
11180                 dev_close(tp->dev);
11181                 tg3_full_lock(tp, 0);
11182         }
11183         return err;
11184 }
11185
11186 static void tg3_reset_task(struct work_struct *work)
11187 {
11188         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11189         int err;
11190
11191         rtnl_lock();
11192         tg3_full_lock(tp, 0);
11193
11194         if (!netif_running(tp->dev)) {
11195                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11196                 tg3_full_unlock(tp);
11197                 rtnl_unlock();
11198                 return;
11199         }
11200
11201         tg3_full_unlock(tp);
11202
11203         tg3_phy_stop(tp);
11204
11205         tg3_netif_stop(tp);
11206
11207         tg3_full_lock(tp, 1);
11208
11209         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11210                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11211                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11212                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11213                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11214         }
11215
11216         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11217         err = tg3_init_hw(tp, true);
11218         if (err)
11219                 goto out;
11220
11221         tg3_netif_start(tp);
11222
11223 out:
11224         tg3_full_unlock(tp);
11225
11226         if (!err)
11227                 tg3_phy_start(tp);
11228
11229         tg3_flag_clear(tp, RESET_TASK_PENDING);
11230         rtnl_unlock();
11231 }
11232
11233 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11234 {
11235         irq_handler_t fn;
11236         unsigned long flags;
11237         char *name;
11238         struct tg3_napi *tnapi = &tp->napi[irq_num];
11239
11240         if (tp->irq_cnt == 1)
11241                 name = tp->dev->name;
11242         else {
11243                 name = &tnapi->irq_lbl[0];
11244                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11245                         snprintf(name, IFNAMSIZ,
11246                                  "%s-txrx-%d", tp->dev->name, irq_num);
11247                 else if (tnapi->tx_buffers)
11248                         snprintf(name, IFNAMSIZ,
11249                                  "%s-tx-%d", tp->dev->name, irq_num);
11250                 else if (tnapi->rx_rcb)
11251                         snprintf(name, IFNAMSIZ,
11252                                  "%s-rx-%d", tp->dev->name, irq_num);
11253                 else
11254                         snprintf(name, IFNAMSIZ,
11255                                  "%s-%d", tp->dev->name, irq_num);
11256                 name[IFNAMSIZ-1] = 0;
11257         }
11258
11259         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11260                 fn = tg3_msi;
11261                 if (tg3_flag(tp, 1SHOT_MSI))
11262                         fn = tg3_msi_1shot;
11263                 flags = 0;
11264         } else {
11265                 fn = tg3_interrupt;
11266                 if (tg3_flag(tp, TAGGED_STATUS))
11267                         fn = tg3_interrupt_tagged;
11268                 flags = IRQF_SHARED;
11269         }
11270
11271         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11272 }
11273
11274 static int tg3_test_interrupt(struct tg3 *tp)
11275 {
11276         struct tg3_napi *tnapi = &tp->napi[0];
11277         struct net_device *dev = tp->dev;
11278         int err, i, intr_ok = 0;
11279         u32 val;
11280
11281         if (!netif_running(dev))
11282                 return -ENODEV;
11283
11284         tg3_disable_ints(tp);
11285
11286         free_irq(tnapi->irq_vec, tnapi);
11287
11288         /*
11289          * Turn off MSI one shot mode.  Otherwise this test has no
11290          * observable way to know whether the interrupt was delivered.
11291          */
11292         if (tg3_flag(tp, 57765_PLUS)) {
11293                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11294                 tw32(MSGINT_MODE, val);
11295         }
11296
11297         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11298                           IRQF_SHARED, dev->name, tnapi);
11299         if (err)
11300                 return err;
11301
11302         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11303         tg3_enable_ints(tp);
11304
11305         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11306                tnapi->coal_now);
11307
11308         for (i = 0; i < 5; i++) {
11309                 u32 int_mbox, misc_host_ctrl;
11310
11311                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11312                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11313
11314                 if ((int_mbox != 0) ||
11315                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11316                         intr_ok = 1;
11317                         break;
11318                 }
11319
11320                 if (tg3_flag(tp, 57765_PLUS) &&
11321                     tnapi->hw_status->status_tag != tnapi->last_tag)
11322                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11323
11324                 msleep(10);
11325         }
11326
11327         tg3_disable_ints(tp);
11328
11329         free_irq(tnapi->irq_vec, tnapi);
11330
11331         err = tg3_request_irq(tp, 0);
11332
11333         if (err)
11334                 return err;
11335
11336         if (intr_ok) {
11337                 /* Reenable MSI one shot mode. */
11338                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11339                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11340                         tw32(MSGINT_MODE, val);
11341                 }
11342                 return 0;
11343         }
11344
11345         return -EIO;
11346 }
11347
11348 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11349  * successfully restored
11350  */
11351 static int tg3_test_msi(struct tg3 *tp)
11352 {
11353         int err;
11354         u16 pci_cmd;
11355
11356         if (!tg3_flag(tp, USING_MSI))
11357                 return 0;
11358
11359         /* Turn off SERR reporting in case MSI terminates with Master
11360          * Abort.
11361          */
11362         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11363         pci_write_config_word(tp->pdev, PCI_COMMAND,
11364                               pci_cmd & ~PCI_COMMAND_SERR);
11365
11366         err = tg3_test_interrupt(tp);
11367
11368         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11369
11370         if (!err)
11371                 return 0;
11372
11373         /* other failures */
11374         if (err != -EIO)
11375                 return err;
11376
11377         /* MSI test failed, go back to INTx mode */
11378         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11379                     "to INTx mode. Please report this failure to the PCI "
11380                     "maintainer and include system chipset information\n");
11381
11382         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11383
11384         pci_disable_msi(tp->pdev);
11385
11386         tg3_flag_clear(tp, USING_MSI);
11387         tp->napi[0].irq_vec = tp->pdev->irq;
11388
11389         err = tg3_request_irq(tp, 0);
11390         if (err)
11391                 return err;
11392
11393         /* Need to reset the chip because the MSI cycle may have terminated
11394          * with Master Abort.
11395          */
11396         tg3_full_lock(tp, 1);
11397
11398         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11399         err = tg3_init_hw(tp, true);
11400
11401         tg3_full_unlock(tp);
11402
11403         if (err)
11404                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11405
11406         return err;
11407 }
11408
11409 static int tg3_request_firmware(struct tg3 *tp)
11410 {
11411         const struct tg3_firmware_hdr *fw_hdr;
11412
11413         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11414                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11415                            tp->fw_needed);
11416                 return -ENOENT;
11417         }
11418
11419         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11420
11421         /* Firmware blob starts with version numbers, followed by
11422          * start address and _full_ length including BSS sections
11423          * (which must be longer than the actual data, of course
11424          */
11425
11426         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11427         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11428                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11429                            tp->fw_len, tp->fw_needed);
11430                 release_firmware(tp->fw);
11431                 tp->fw = NULL;
11432                 return -EINVAL;
11433         }
11434
11435         /* We no longer need firmware; we have it. */
11436         tp->fw_needed = NULL;
11437         return 0;
11438 }
11439
11440 static u32 tg3_irq_count(struct tg3 *tp)
11441 {
11442         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11443
11444         if (irq_cnt > 1) {
11445                 /* We want as many rx rings enabled as there are cpus.
11446                  * In multiqueue MSI-X mode, the first MSI-X vector
11447                  * only deals with link interrupts, etc, so we add
11448                  * one to the number of vectors we are requesting.
11449                  */
11450                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11451         }
11452
11453         return irq_cnt;
11454 }
11455
11456 static bool tg3_enable_msix(struct tg3 *tp)
11457 {
11458         int i, rc;
11459         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11460
11461         tp->txq_cnt = tp->txq_req;
11462         tp->rxq_cnt = tp->rxq_req;
11463         if (!tp->rxq_cnt)
11464                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11465         if (tp->rxq_cnt > tp->rxq_max)
11466                 tp->rxq_cnt = tp->rxq_max;
11467
11468         /* Disable multiple TX rings by default.  Simple round-robin hardware
11469          * scheduling of the TX rings can cause starvation of rings with
11470          * small packets when other rings have TSO or jumbo packets.
11471          */
11472         if (!tp->txq_req)
11473                 tp->txq_cnt = 1;
11474
11475         tp->irq_cnt = tg3_irq_count(tp);
11476
11477         for (i = 0; i < tp->irq_max; i++) {
11478                 msix_ent[i].entry  = i;
11479                 msix_ent[i].vector = 0;
11480         }
11481
11482         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11483         if (rc < 0) {
11484                 return false;
11485         } else if (rc < tp->irq_cnt) {
11486                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11487                               tp->irq_cnt, rc);
11488                 tp->irq_cnt = rc;
11489                 tp->rxq_cnt = max(rc - 1, 1);
11490                 if (tp->txq_cnt)
11491                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11492         }
11493
11494         for (i = 0; i < tp->irq_max; i++)
11495                 tp->napi[i].irq_vec = msix_ent[i].vector;
11496
11497         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11498                 pci_disable_msix(tp->pdev);
11499                 return false;
11500         }
11501
11502         if (tp->irq_cnt == 1)
11503                 return true;
11504
11505         tg3_flag_set(tp, ENABLE_RSS);
11506
11507         if (tp->txq_cnt > 1)
11508                 tg3_flag_set(tp, ENABLE_TSS);
11509
11510         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11511
11512         return true;
11513 }
11514
11515 static void tg3_ints_init(struct tg3 *tp)
11516 {
11517         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11518             !tg3_flag(tp, TAGGED_STATUS)) {
11519                 /* All MSI supporting chips should support tagged
11520                  * status.  Assert that this is the case.
11521                  */
11522                 netdev_warn(tp->dev,
11523                             "MSI without TAGGED_STATUS? Not using MSI\n");
11524                 goto defcfg;
11525         }
11526
11527         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11528                 tg3_flag_set(tp, USING_MSIX);
11529         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11530                 tg3_flag_set(tp, USING_MSI);
11531
11532         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11533                 u32 msi_mode = tr32(MSGINT_MODE);
11534                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11535                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11536                 if (!tg3_flag(tp, 1SHOT_MSI))
11537                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11538                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11539         }
11540 defcfg:
11541         if (!tg3_flag(tp, USING_MSIX)) {
11542                 tp->irq_cnt = 1;
11543                 tp->napi[0].irq_vec = tp->pdev->irq;
11544         }
11545
11546         if (tp->irq_cnt == 1) {
11547                 tp->txq_cnt = 1;
11548                 tp->rxq_cnt = 1;
11549                 netif_set_real_num_tx_queues(tp->dev, 1);
11550                 netif_set_real_num_rx_queues(tp->dev, 1);
11551         }
11552 }
11553
11554 static void tg3_ints_fini(struct tg3 *tp)
11555 {
11556         if (tg3_flag(tp, USING_MSIX))
11557                 pci_disable_msix(tp->pdev);
11558         else if (tg3_flag(tp, USING_MSI))
11559                 pci_disable_msi(tp->pdev);
11560         tg3_flag_clear(tp, USING_MSI);
11561         tg3_flag_clear(tp, USING_MSIX);
11562         tg3_flag_clear(tp, ENABLE_RSS);
11563         tg3_flag_clear(tp, ENABLE_TSS);
11564 }
11565
11566 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11567                      bool init)
11568 {
11569         struct net_device *dev = tp->dev;
11570         int i, err;
11571
11572         /*
11573          * Setup interrupts first so we know how
11574          * many NAPI resources to allocate
11575          */
11576         tg3_ints_init(tp);
11577
11578         tg3_rss_check_indir_tbl(tp);
11579
11580         /* The placement of this call is tied
11581          * to the setup and use of Host TX descriptors.
11582          */
11583         err = tg3_alloc_consistent(tp);
11584         if (err)
11585                 goto out_ints_fini;
11586
11587         tg3_napi_init(tp);
11588
11589         tg3_napi_enable(tp);
11590
11591         for (i = 0; i < tp->irq_cnt; i++) {
11592                 err = tg3_request_irq(tp, i);
11593                 if (err) {
11594                         for (i--; i >= 0; i--) {
11595                                 struct tg3_napi *tnapi = &tp->napi[i];
11596
11597                                 free_irq(tnapi->irq_vec, tnapi);
11598                         }
11599                         goto out_napi_fini;
11600                 }
11601         }
11602
11603         tg3_full_lock(tp, 0);
11604
11605         if (init)
11606                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11607
11608         err = tg3_init_hw(tp, reset_phy);
11609         if (err) {
11610                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11611                 tg3_free_rings(tp);
11612         }
11613
11614         tg3_full_unlock(tp);
11615
11616         if (err)
11617                 goto out_free_irq;
11618
11619         if (test_irq && tg3_flag(tp, USING_MSI)) {
11620                 err = tg3_test_msi(tp);
11621
11622                 if (err) {
11623                         tg3_full_lock(tp, 0);
11624                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11625                         tg3_free_rings(tp);
11626                         tg3_full_unlock(tp);
11627
11628                         goto out_napi_fini;
11629                 }
11630
11631                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11632                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11633
11634                         tw32(PCIE_TRANSACTION_CFG,
11635                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11636                 }
11637         }
11638
11639         tg3_phy_start(tp);
11640
11641         tg3_hwmon_open(tp);
11642
11643         tg3_full_lock(tp, 0);
11644
11645         tg3_timer_start(tp);
11646         tg3_flag_set(tp, INIT_COMPLETE);
11647         tg3_enable_ints(tp);
11648
11649         tg3_ptp_resume(tp);
11650
11651         tg3_full_unlock(tp);
11652
11653         netif_tx_start_all_queues(dev);
11654
11655         /*
11656          * Reset loopback feature if it was turned on while the device was down
11657          * make sure that it's installed properly now.
11658          */
11659         if (dev->features & NETIF_F_LOOPBACK)
11660                 tg3_set_loopback(dev, dev->features);
11661
11662         return 0;
11663
11664 out_free_irq:
11665         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11666                 struct tg3_napi *tnapi = &tp->napi[i];
11667                 free_irq(tnapi->irq_vec, tnapi);
11668         }
11669
11670 out_napi_fini:
11671         tg3_napi_disable(tp);
11672         tg3_napi_fini(tp);
11673         tg3_free_consistent(tp);
11674
11675 out_ints_fini:
11676         tg3_ints_fini(tp);
11677
11678         return err;
11679 }
11680
11681 static void tg3_stop(struct tg3 *tp)
11682 {
11683         int i;
11684
11685         tg3_reset_task_cancel(tp);
11686         tg3_netif_stop(tp);
11687
11688         tg3_timer_stop(tp);
11689
11690         tg3_hwmon_close(tp);
11691
11692         tg3_phy_stop(tp);
11693
11694         tg3_full_lock(tp, 1);
11695
11696         tg3_disable_ints(tp);
11697
11698         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11699         tg3_free_rings(tp);
11700         tg3_flag_clear(tp, INIT_COMPLETE);
11701
11702         tg3_full_unlock(tp);
11703
11704         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11705                 struct tg3_napi *tnapi = &tp->napi[i];
11706                 free_irq(tnapi->irq_vec, tnapi);
11707         }
11708
11709         tg3_ints_fini(tp);
11710
11711         tg3_napi_fini(tp);
11712
11713         tg3_free_consistent(tp);
11714 }
11715
11716 static int tg3_open(struct net_device *dev)
11717 {
11718         struct tg3 *tp = netdev_priv(dev);
11719         int err;
11720
11721         if (tp->pcierr_recovery) {
11722                 netdev_err(dev, "Failed to open device. PCI error recovery "
11723                            "in progress\n");
11724                 return -EAGAIN;
11725         }
11726
11727         if (tp->fw_needed) {
11728                 err = tg3_request_firmware(tp);
11729                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11730                         if (err) {
11731                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11732                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11733                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11734                                 netdev_warn(tp->dev, "EEE capability restored\n");
11735                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11736                         }
11737                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11738                         if (err)
11739                                 return err;
11740                 } else if (err) {
11741                         netdev_warn(tp->dev, "TSO capability disabled\n");
11742                         tg3_flag_clear(tp, TSO_CAPABLE);
11743                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11744                         netdev_notice(tp->dev, "TSO capability restored\n");
11745                         tg3_flag_set(tp, TSO_CAPABLE);
11746                 }
11747         }
11748
11749         tg3_carrier_off(tp);
11750
11751         err = tg3_power_up(tp);
11752         if (err)
11753                 return err;
11754
11755         tg3_full_lock(tp, 0);
11756
11757         tg3_disable_ints(tp);
11758         tg3_flag_clear(tp, INIT_COMPLETE);
11759
11760         tg3_full_unlock(tp);
11761
11762         err = tg3_start(tp,
11763                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11764                         true, true);
11765         if (err) {
11766                 tg3_frob_aux_power(tp, false);
11767                 pci_set_power_state(tp->pdev, PCI_D3hot);
11768         }
11769
11770         return err;
11771 }
11772
11773 static int tg3_close(struct net_device *dev)
11774 {
11775         struct tg3 *tp = netdev_priv(dev);
11776
11777         if (tp->pcierr_recovery) {
11778                 netdev_err(dev, "Failed to close device. PCI error recovery "
11779                            "in progress\n");
11780                 return -EAGAIN;
11781         }
11782
11783         tg3_stop(tp);
11784
11785         if (pci_device_is_present(tp->pdev)) {
11786                 tg3_power_down_prepare(tp);
11787
11788                 tg3_carrier_off(tp);
11789         }
11790         return 0;
11791 }
11792
11793 static inline u64 get_stat64(tg3_stat64_t *val)
11794 {
11795        return ((u64)val->high << 32) | ((u64)val->low);
11796 }
11797
11798 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11799 {
11800         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11801
11802         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11803             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11804              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11805                 u32 val;
11806
11807                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11808                         tg3_writephy(tp, MII_TG3_TEST1,
11809                                      val | MII_TG3_TEST1_CRC_EN);
11810                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11811                 } else
11812                         val = 0;
11813
11814                 tp->phy_crc_errors += val;
11815
11816                 return tp->phy_crc_errors;
11817         }
11818
11819         return get_stat64(&hw_stats->rx_fcs_errors);
11820 }
11821
11822 #define ESTAT_ADD(member) \
11823         estats->member =        old_estats->member + \
11824                                 get_stat64(&hw_stats->member)
11825
11826 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11827 {
11828         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11829         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11830
11831         ESTAT_ADD(rx_octets);
11832         ESTAT_ADD(rx_fragments);
11833         ESTAT_ADD(rx_ucast_packets);
11834         ESTAT_ADD(rx_mcast_packets);
11835         ESTAT_ADD(rx_bcast_packets);
11836         ESTAT_ADD(rx_fcs_errors);
11837         ESTAT_ADD(rx_align_errors);
11838         ESTAT_ADD(rx_xon_pause_rcvd);
11839         ESTAT_ADD(rx_xoff_pause_rcvd);
11840         ESTAT_ADD(rx_mac_ctrl_rcvd);
11841         ESTAT_ADD(rx_xoff_entered);
11842         ESTAT_ADD(rx_frame_too_long_errors);
11843         ESTAT_ADD(rx_jabbers);
11844         ESTAT_ADD(rx_undersize_packets);
11845         ESTAT_ADD(rx_in_length_errors);
11846         ESTAT_ADD(rx_out_length_errors);
11847         ESTAT_ADD(rx_64_or_less_octet_packets);
11848         ESTAT_ADD(rx_65_to_127_octet_packets);
11849         ESTAT_ADD(rx_128_to_255_octet_packets);
11850         ESTAT_ADD(rx_256_to_511_octet_packets);
11851         ESTAT_ADD(rx_512_to_1023_octet_packets);
11852         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11853         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11854         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11855         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11856         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11857
11858         ESTAT_ADD(tx_octets);
11859         ESTAT_ADD(tx_collisions);
11860         ESTAT_ADD(tx_xon_sent);
11861         ESTAT_ADD(tx_xoff_sent);
11862         ESTAT_ADD(tx_flow_control);
11863         ESTAT_ADD(tx_mac_errors);
11864         ESTAT_ADD(tx_single_collisions);
11865         ESTAT_ADD(tx_mult_collisions);
11866         ESTAT_ADD(tx_deferred);
11867         ESTAT_ADD(tx_excessive_collisions);
11868         ESTAT_ADD(tx_late_collisions);
11869         ESTAT_ADD(tx_collide_2times);
11870         ESTAT_ADD(tx_collide_3times);
11871         ESTAT_ADD(tx_collide_4times);
11872         ESTAT_ADD(tx_collide_5times);
11873         ESTAT_ADD(tx_collide_6times);
11874         ESTAT_ADD(tx_collide_7times);
11875         ESTAT_ADD(tx_collide_8times);
11876         ESTAT_ADD(tx_collide_9times);
11877         ESTAT_ADD(tx_collide_10times);
11878         ESTAT_ADD(tx_collide_11times);
11879         ESTAT_ADD(tx_collide_12times);
11880         ESTAT_ADD(tx_collide_13times);
11881         ESTAT_ADD(tx_collide_14times);
11882         ESTAT_ADD(tx_collide_15times);
11883         ESTAT_ADD(tx_ucast_packets);
11884         ESTAT_ADD(tx_mcast_packets);
11885         ESTAT_ADD(tx_bcast_packets);
11886         ESTAT_ADD(tx_carrier_sense_errors);
11887         ESTAT_ADD(tx_discards);
11888         ESTAT_ADD(tx_errors);
11889
11890         ESTAT_ADD(dma_writeq_full);
11891         ESTAT_ADD(dma_write_prioq_full);
11892         ESTAT_ADD(rxbds_empty);
11893         ESTAT_ADD(rx_discards);
11894         ESTAT_ADD(rx_errors);
11895         ESTAT_ADD(rx_threshold_hit);
11896
11897         ESTAT_ADD(dma_readq_full);
11898         ESTAT_ADD(dma_read_prioq_full);
11899         ESTAT_ADD(tx_comp_queue_full);
11900
11901         ESTAT_ADD(ring_set_send_prod_index);
11902         ESTAT_ADD(ring_status_update);
11903         ESTAT_ADD(nic_irqs);
11904         ESTAT_ADD(nic_avoided_irqs);
11905         ESTAT_ADD(nic_tx_threshold_hit);
11906
11907         ESTAT_ADD(mbuf_lwm_thresh_hit);
11908 }
11909
11910 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11911 {
11912         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11913         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11914
11915         stats->rx_packets = old_stats->rx_packets +
11916                 get_stat64(&hw_stats->rx_ucast_packets) +
11917                 get_stat64(&hw_stats->rx_mcast_packets) +
11918                 get_stat64(&hw_stats->rx_bcast_packets);
11919
11920         stats->tx_packets = old_stats->tx_packets +
11921                 get_stat64(&hw_stats->tx_ucast_packets) +
11922                 get_stat64(&hw_stats->tx_mcast_packets) +
11923                 get_stat64(&hw_stats->tx_bcast_packets);
11924
11925         stats->rx_bytes = old_stats->rx_bytes +
11926                 get_stat64(&hw_stats->rx_octets);
11927         stats->tx_bytes = old_stats->tx_bytes +
11928                 get_stat64(&hw_stats->tx_octets);
11929
11930         stats->rx_errors = old_stats->rx_errors +
11931                 get_stat64(&hw_stats->rx_errors);
11932         stats->tx_errors = old_stats->tx_errors +
11933                 get_stat64(&hw_stats->tx_errors) +
11934                 get_stat64(&hw_stats->tx_mac_errors) +
11935                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11936                 get_stat64(&hw_stats->tx_discards);
11937
11938         stats->multicast = old_stats->multicast +
11939                 get_stat64(&hw_stats->rx_mcast_packets);
11940         stats->collisions = old_stats->collisions +
11941                 get_stat64(&hw_stats->tx_collisions);
11942
11943         stats->rx_length_errors = old_stats->rx_length_errors +
11944                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11945                 get_stat64(&hw_stats->rx_undersize_packets);
11946
11947         stats->rx_frame_errors = old_stats->rx_frame_errors +
11948                 get_stat64(&hw_stats->rx_align_errors);
11949         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11950                 get_stat64(&hw_stats->tx_discards);
11951         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11952                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11953
11954         stats->rx_crc_errors = old_stats->rx_crc_errors +
11955                 tg3_calc_crc_errors(tp);
11956
11957         stats->rx_missed_errors = old_stats->rx_missed_errors +
11958                 get_stat64(&hw_stats->rx_discards);
11959
11960         stats->rx_dropped = tp->rx_dropped;
11961         stats->tx_dropped = tp->tx_dropped;
11962 }
11963
11964 static int tg3_get_regs_len(struct net_device *dev)
11965 {
11966         return TG3_REG_BLK_SIZE;
11967 }
11968
11969 static void tg3_get_regs(struct net_device *dev,
11970                 struct ethtool_regs *regs, void *_p)
11971 {
11972         struct tg3 *tp = netdev_priv(dev);
11973
11974         regs->version = 0;
11975
11976         memset(_p, 0, TG3_REG_BLK_SIZE);
11977
11978         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11979                 return;
11980
11981         tg3_full_lock(tp, 0);
11982
11983         tg3_dump_legacy_regs(tp, (u32 *)_p);
11984
11985         tg3_full_unlock(tp);
11986 }
11987
11988 static int tg3_get_eeprom_len(struct net_device *dev)
11989 {
11990         struct tg3 *tp = netdev_priv(dev);
11991
11992         return tp->nvram_size;
11993 }
11994
11995 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11996 {
11997         struct tg3 *tp = netdev_priv(dev);
11998         int ret, cpmu_restore = 0;
11999         u8  *pd;
12000         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12001         __be32 val;
12002
12003         if (tg3_flag(tp, NO_NVRAM))
12004                 return -EINVAL;
12005
12006         offset = eeprom->offset;
12007         len = eeprom->len;
12008         eeprom->len = 0;
12009
12010         eeprom->magic = TG3_EEPROM_MAGIC;
12011
12012         /* Override clock, link aware and link idle modes */
12013         if (tg3_flag(tp, CPMU_PRESENT)) {
12014                 cpmu_val = tr32(TG3_CPMU_CTRL);
12015                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12016                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12017                         tw32(TG3_CPMU_CTRL, cpmu_val &
12018                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12019                                              CPMU_CTRL_LINK_IDLE_MODE));
12020                         cpmu_restore = 1;
12021                 }
12022         }
12023         tg3_override_clk(tp);
12024
12025         if (offset & 3) {
12026                 /* adjustments to start on required 4 byte boundary */
12027                 b_offset = offset & 3;
12028                 b_count = 4 - b_offset;
12029                 if (b_count > len) {
12030                         /* i.e. offset=1 len=2 */
12031                         b_count = len;
12032                 }
12033                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12034                 if (ret)
12035                         goto eeprom_done;
12036                 memcpy(data, ((char *)&val) + b_offset, b_count);
12037                 len -= b_count;
12038                 offset += b_count;
12039                 eeprom->len += b_count;
12040         }
12041
12042         /* read bytes up to the last 4 byte boundary */
12043         pd = &data[eeprom->len];
12044         for (i = 0; i < (len - (len & 3)); i += 4) {
12045                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12046                 if (ret) {
12047                         if (i)
12048                                 i -= 4;
12049                         eeprom->len += i;
12050                         goto eeprom_done;
12051                 }
12052                 memcpy(pd + i, &val, 4);
12053                 if (need_resched()) {
12054                         if (signal_pending(current)) {
12055                                 eeprom->len += i;
12056                                 ret = -EINTR;
12057                                 goto eeprom_done;
12058                         }
12059                         cond_resched();
12060                 }
12061         }
12062         eeprom->len += i;
12063
12064         if (len & 3) {
12065                 /* read last bytes not ending on 4 byte boundary */
12066                 pd = &data[eeprom->len];
12067                 b_count = len & 3;
12068                 b_offset = offset + len - b_count;
12069                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12070                 if (ret)
12071                         goto eeprom_done;
12072                 memcpy(pd, &val, b_count);
12073                 eeprom->len += b_count;
12074         }
12075         ret = 0;
12076
12077 eeprom_done:
12078         /* Restore clock, link aware and link idle modes */
12079         tg3_restore_clk(tp);
12080         if (cpmu_restore)
12081                 tw32(TG3_CPMU_CTRL, cpmu_val);
12082
12083         return ret;
12084 }
12085
12086 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12087 {
12088         struct tg3 *tp = netdev_priv(dev);
12089         int ret;
12090         u32 offset, len, b_offset, odd_len;
12091         u8 *buf;
12092         __be32 start = 0, end;
12093
12094         if (tg3_flag(tp, NO_NVRAM) ||
12095             eeprom->magic != TG3_EEPROM_MAGIC)
12096                 return -EINVAL;
12097
12098         offset = eeprom->offset;
12099         len = eeprom->len;
12100
12101         if ((b_offset = (offset & 3))) {
12102                 /* adjustments to start on required 4 byte boundary */
12103                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12104                 if (ret)
12105                         return ret;
12106                 len += b_offset;
12107                 offset &= ~3;
12108                 if (len < 4)
12109                         len = 4;
12110         }
12111
12112         odd_len = 0;
12113         if (len & 3) {
12114                 /* adjustments to end on required 4 byte boundary */
12115                 odd_len = 1;
12116                 len = (len + 3) & ~3;
12117                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12118                 if (ret)
12119                         return ret;
12120         }
12121
12122         buf = data;
12123         if (b_offset || odd_len) {
12124                 buf = kmalloc(len, GFP_KERNEL);
12125                 if (!buf)
12126                         return -ENOMEM;
12127                 if (b_offset)
12128                         memcpy(buf, &start, 4);
12129                 if (odd_len)
12130                         memcpy(buf+len-4, &end, 4);
12131                 memcpy(buf + b_offset, data, eeprom->len);
12132         }
12133
12134         ret = tg3_nvram_write_block(tp, offset, len, buf);
12135
12136         if (buf != data)
12137                 kfree(buf);
12138
12139         return ret;
12140 }
12141
12142 static int tg3_get_link_ksettings(struct net_device *dev,
12143                                   struct ethtool_link_ksettings *cmd)
12144 {
12145         struct tg3 *tp = netdev_priv(dev);
12146         u32 supported, advertising;
12147
12148         if (tg3_flag(tp, USE_PHYLIB)) {
12149                 struct phy_device *phydev;
12150                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12151                         return -EAGAIN;
12152                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12153                 phy_ethtool_ksettings_get(phydev, cmd);
12154
12155                 return 0;
12156         }
12157
12158         supported = (SUPPORTED_Autoneg);
12159
12160         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12161                 supported |= (SUPPORTED_1000baseT_Half |
12162                               SUPPORTED_1000baseT_Full);
12163
12164         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12165                 supported |= (SUPPORTED_100baseT_Half |
12166                               SUPPORTED_100baseT_Full |
12167                               SUPPORTED_10baseT_Half |
12168                               SUPPORTED_10baseT_Full |
12169                               SUPPORTED_TP);
12170                 cmd->base.port = PORT_TP;
12171         } else {
12172                 supported |= SUPPORTED_FIBRE;
12173                 cmd->base.port = PORT_FIBRE;
12174         }
12175         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12176                                                 supported);
12177
12178         advertising = tp->link_config.advertising;
12179         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12180                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12181                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182                                 advertising |= ADVERTISED_Pause;
12183                         } else {
12184                                 advertising |= ADVERTISED_Pause |
12185                                         ADVERTISED_Asym_Pause;
12186                         }
12187                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12188                         advertising |= ADVERTISED_Asym_Pause;
12189                 }
12190         }
12191         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12192                                                 advertising);
12193
12194         if (netif_running(dev) && tp->link_up) {
12195                 cmd->base.speed = tp->link_config.active_speed;
12196                 cmd->base.duplex = tp->link_config.active_duplex;
12197                 ethtool_convert_legacy_u32_to_link_mode(
12198                         cmd->link_modes.lp_advertising,
12199                         tp->link_config.rmt_adv);
12200
12201                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12202                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12203                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12204                         else
12205                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12206                 }
12207         } else {
12208                 cmd->base.speed = SPEED_UNKNOWN;
12209                 cmd->base.duplex = DUPLEX_UNKNOWN;
12210                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12211         }
12212         cmd->base.phy_address = tp->phy_addr;
12213         cmd->base.autoneg = tp->link_config.autoneg;
12214         return 0;
12215 }
12216
12217 static int tg3_set_link_ksettings(struct net_device *dev,
12218                                   const struct ethtool_link_ksettings *cmd)
12219 {
12220         struct tg3 *tp = netdev_priv(dev);
12221         u32 speed = cmd->base.speed;
12222         u32 advertising;
12223
12224         if (tg3_flag(tp, USE_PHYLIB)) {
12225                 struct phy_device *phydev;
12226                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12227                         return -EAGAIN;
12228                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12229                 return phy_ethtool_ksettings_set(phydev, cmd);
12230         }
12231
12232         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12233             cmd->base.autoneg != AUTONEG_DISABLE)
12234                 return -EINVAL;
12235
12236         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12237             cmd->base.duplex != DUPLEX_FULL &&
12238             cmd->base.duplex != DUPLEX_HALF)
12239                 return -EINVAL;
12240
12241         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12242                                                 cmd->link_modes.advertising);
12243
12244         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12245                 u32 mask = ADVERTISED_Autoneg |
12246                            ADVERTISED_Pause |
12247                            ADVERTISED_Asym_Pause;
12248
12249                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12250                         mask |= ADVERTISED_1000baseT_Half |
12251                                 ADVERTISED_1000baseT_Full;
12252
12253                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12254                         mask |= ADVERTISED_100baseT_Half |
12255                                 ADVERTISED_100baseT_Full |
12256                                 ADVERTISED_10baseT_Half |
12257                                 ADVERTISED_10baseT_Full |
12258                                 ADVERTISED_TP;
12259                 else
12260                         mask |= ADVERTISED_FIBRE;
12261
12262                 if (advertising & ~mask)
12263                         return -EINVAL;
12264
12265                 mask &= (ADVERTISED_1000baseT_Half |
12266                          ADVERTISED_1000baseT_Full |
12267                          ADVERTISED_100baseT_Half |
12268                          ADVERTISED_100baseT_Full |
12269                          ADVERTISED_10baseT_Half |
12270                          ADVERTISED_10baseT_Full);
12271
12272                 advertising &= mask;
12273         } else {
12274                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12275                         if (speed != SPEED_1000)
12276                                 return -EINVAL;
12277
12278                         if (cmd->base.duplex != DUPLEX_FULL)
12279                                 return -EINVAL;
12280                 } else {
12281                         if (speed != SPEED_100 &&
12282                             speed != SPEED_10)
12283                                 return -EINVAL;
12284                 }
12285         }
12286
12287         tg3_full_lock(tp, 0);
12288
12289         tp->link_config.autoneg = cmd->base.autoneg;
12290         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12291                 tp->link_config.advertising = (advertising |
12292                                               ADVERTISED_Autoneg);
12293                 tp->link_config.speed = SPEED_UNKNOWN;
12294                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12295         } else {
12296                 tp->link_config.advertising = 0;
12297                 tp->link_config.speed = speed;
12298                 tp->link_config.duplex = cmd->base.duplex;
12299         }
12300
12301         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12302
12303         tg3_warn_mgmt_link_flap(tp);
12304
12305         if (netif_running(dev))
12306                 tg3_setup_phy(tp, true);
12307
12308         tg3_full_unlock(tp);
12309
12310         return 0;
12311 }
12312
12313 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12314 {
12315         struct tg3 *tp = netdev_priv(dev);
12316
12317         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12318         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12319         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12320         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12321 }
12322
12323 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12324 {
12325         struct tg3 *tp = netdev_priv(dev);
12326
12327         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12328                 wol->supported = WAKE_MAGIC;
12329         else
12330                 wol->supported = 0;
12331         wol->wolopts = 0;
12332         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12333                 wol->wolopts = WAKE_MAGIC;
12334         memset(&wol->sopass, 0, sizeof(wol->sopass));
12335 }
12336
12337 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12338 {
12339         struct tg3 *tp = netdev_priv(dev);
12340         struct device *dp = &tp->pdev->dev;
12341
12342         if (wol->wolopts & ~WAKE_MAGIC)
12343                 return -EINVAL;
12344         if ((wol->wolopts & WAKE_MAGIC) &&
12345             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12346                 return -EINVAL;
12347
12348         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12349
12350         if (device_may_wakeup(dp))
12351                 tg3_flag_set(tp, WOL_ENABLE);
12352         else
12353                 tg3_flag_clear(tp, WOL_ENABLE);
12354
12355         return 0;
12356 }
12357
12358 static u32 tg3_get_msglevel(struct net_device *dev)
12359 {
12360         struct tg3 *tp = netdev_priv(dev);
12361         return tp->msg_enable;
12362 }
12363
12364 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12365 {
12366         struct tg3 *tp = netdev_priv(dev);
12367         tp->msg_enable = value;
12368 }
12369
12370 static int tg3_nway_reset(struct net_device *dev)
12371 {
12372         struct tg3 *tp = netdev_priv(dev);
12373         int r;
12374
12375         if (!netif_running(dev))
12376                 return -EAGAIN;
12377
12378         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12379                 return -EINVAL;
12380
12381         tg3_warn_mgmt_link_flap(tp);
12382
12383         if (tg3_flag(tp, USE_PHYLIB)) {
12384                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12385                         return -EAGAIN;
12386                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12387         } else {
12388                 u32 bmcr;
12389
12390                 spin_lock_bh(&tp->lock);
12391                 r = -EINVAL;
12392                 tg3_readphy(tp, MII_BMCR, &bmcr);
12393                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12394                     ((bmcr & BMCR_ANENABLE) ||
12395                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12396                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12397                                                    BMCR_ANENABLE);
12398                         r = 0;
12399                 }
12400                 spin_unlock_bh(&tp->lock);
12401         }
12402
12403         return r;
12404 }
12405
12406 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12407 {
12408         struct tg3 *tp = netdev_priv(dev);
12409
12410         ering->rx_max_pending = tp->rx_std_ring_mask;
12411         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12412                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12413         else
12414                 ering->rx_jumbo_max_pending = 0;
12415
12416         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12417
12418         ering->rx_pending = tp->rx_pending;
12419         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12420                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12421         else
12422                 ering->rx_jumbo_pending = 0;
12423
12424         ering->tx_pending = tp->napi[0].tx_pending;
12425 }
12426
12427 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12428 {
12429         struct tg3 *tp = netdev_priv(dev);
12430         int i, irq_sync = 0, err = 0;
12431         bool reset_phy = false;
12432
12433         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12434             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12435             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12436             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12437             (tg3_flag(tp, TSO_BUG) &&
12438              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12439                 return -EINVAL;
12440
12441         if (netif_running(dev)) {
12442                 tg3_phy_stop(tp);
12443                 tg3_netif_stop(tp);
12444                 irq_sync = 1;
12445         }
12446
12447         tg3_full_lock(tp, irq_sync);
12448
12449         tp->rx_pending = ering->rx_pending;
12450
12451         if (tg3_flag(tp, MAX_RXPEND_64) &&
12452             tp->rx_pending > 63)
12453                 tp->rx_pending = 63;
12454
12455         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12456                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12457
12458         for (i = 0; i < tp->irq_max; i++)
12459                 tp->napi[i].tx_pending = ering->tx_pending;
12460
12461         if (netif_running(dev)) {
12462                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12463                 /* Reset PHY to avoid PHY lock up */
12464                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12465                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12466                     tg3_asic_rev(tp) == ASIC_REV_5720)
12467                         reset_phy = true;
12468
12469                 err = tg3_restart_hw(tp, reset_phy);
12470                 if (!err)
12471                         tg3_netif_start(tp);
12472         }
12473
12474         tg3_full_unlock(tp);
12475
12476         if (irq_sync && !err)
12477                 tg3_phy_start(tp);
12478
12479         return err;
12480 }
12481
12482 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12483 {
12484         struct tg3 *tp = netdev_priv(dev);
12485
12486         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12487
12488         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12489                 epause->rx_pause = 1;
12490         else
12491                 epause->rx_pause = 0;
12492
12493         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12494                 epause->tx_pause = 1;
12495         else
12496                 epause->tx_pause = 0;
12497 }
12498
12499 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12500 {
12501         struct tg3 *tp = netdev_priv(dev);
12502         int err = 0;
12503         bool reset_phy = false;
12504
12505         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12506                 tg3_warn_mgmt_link_flap(tp);
12507
12508         if (tg3_flag(tp, USE_PHYLIB)) {
12509                 struct phy_device *phydev;
12510
12511                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12512
12513                 if (!phy_validate_pause(phydev, epause))
12514                         return -EINVAL;
12515
12516                 tp->link_config.flowctrl = 0;
12517                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12518                 if (epause->rx_pause) {
12519                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12520
12521                         if (epause->tx_pause) {
12522                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12523                         }
12524                 } else if (epause->tx_pause) {
12525                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12526                 }
12527
12528                 if (epause->autoneg)
12529                         tg3_flag_set(tp, PAUSE_AUTONEG);
12530                 else
12531                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12532
12533                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12534                         if (phydev->autoneg) {
12535                                 /* phy_set_asym_pause() will
12536                                  * renegotiate the link to inform our
12537                                  * link partner of our flow control
12538                                  * settings, even if the flow control
12539                                  * is forced.  Let tg3_adjust_link()
12540                                  * do the final flow control setup.
12541                                  */
12542                                 return 0;
12543                         }
12544
12545                         if (!epause->autoneg)
12546                                 tg3_setup_flow_control(tp, 0, 0);
12547                 }
12548         } else {
12549                 int irq_sync = 0;
12550
12551                 if (netif_running(dev)) {
12552                         tg3_netif_stop(tp);
12553                         irq_sync = 1;
12554                 }
12555
12556                 tg3_full_lock(tp, irq_sync);
12557
12558                 if (epause->autoneg)
12559                         tg3_flag_set(tp, PAUSE_AUTONEG);
12560                 else
12561                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12562                 if (epause->rx_pause)
12563                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12564                 else
12565                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12566                 if (epause->tx_pause)
12567                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12568                 else
12569                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12570
12571                 if (netif_running(dev)) {
12572                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12573                         /* Reset PHY to avoid PHY lock up */
12574                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12575                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12576                             tg3_asic_rev(tp) == ASIC_REV_5720)
12577                                 reset_phy = true;
12578
12579                         err = tg3_restart_hw(tp, reset_phy);
12580                         if (!err)
12581                                 tg3_netif_start(tp);
12582                 }
12583
12584                 tg3_full_unlock(tp);
12585         }
12586
12587         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12588
12589         return err;
12590 }
12591
12592 static int tg3_get_sset_count(struct net_device *dev, int sset)
12593 {
12594         switch (sset) {
12595         case ETH_SS_TEST:
12596                 return TG3_NUM_TEST;
12597         case ETH_SS_STATS:
12598                 return TG3_NUM_STATS;
12599         default:
12600                 return -EOPNOTSUPP;
12601         }
12602 }
12603
12604 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12605                          u32 *rules __always_unused)
12606 {
12607         struct tg3 *tp = netdev_priv(dev);
12608
12609         if (!tg3_flag(tp, SUPPORT_MSIX))
12610                 return -EOPNOTSUPP;
12611
12612         switch (info->cmd) {
12613         case ETHTOOL_GRXRINGS:
12614                 if (netif_running(tp->dev))
12615                         info->data = tp->rxq_cnt;
12616                 else {
12617                         info->data = num_online_cpus();
12618                         if (info->data > TG3_RSS_MAX_NUM_QS)
12619                                 info->data = TG3_RSS_MAX_NUM_QS;
12620                 }
12621
12622                 return 0;
12623
12624         default:
12625                 return -EOPNOTSUPP;
12626         }
12627 }
12628
12629 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12630 {
12631         u32 size = 0;
12632         struct tg3 *tp = netdev_priv(dev);
12633
12634         if (tg3_flag(tp, SUPPORT_MSIX))
12635                 size = TG3_RSS_INDIR_TBL_SIZE;
12636
12637         return size;
12638 }
12639
12640 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12641 {
12642         struct tg3 *tp = netdev_priv(dev);
12643         int i;
12644
12645         if (hfunc)
12646                 *hfunc = ETH_RSS_HASH_TOP;
12647         if (!indir)
12648                 return 0;
12649
12650         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12651                 indir[i] = tp->rss_ind_tbl[i];
12652
12653         return 0;
12654 }
12655
12656 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12657                         const u8 hfunc)
12658 {
12659         struct tg3 *tp = netdev_priv(dev);
12660         size_t i;
12661
12662         /* We require at least one supported parameter to be changed and no
12663          * change in any of the unsupported parameters
12664          */
12665         if (key ||
12666             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12667                 return -EOPNOTSUPP;
12668
12669         if (!indir)
12670                 return 0;
12671
12672         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12673                 tp->rss_ind_tbl[i] = indir[i];
12674
12675         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12676                 return 0;
12677
12678         /* It is legal to write the indirection
12679          * table while the device is running.
12680          */
12681         tg3_full_lock(tp, 0);
12682         tg3_rss_write_indir_tbl(tp);
12683         tg3_full_unlock(tp);
12684
12685         return 0;
12686 }
12687
12688 static void tg3_get_channels(struct net_device *dev,
12689                              struct ethtool_channels *channel)
12690 {
12691         struct tg3 *tp = netdev_priv(dev);
12692         u32 deflt_qs = netif_get_num_default_rss_queues();
12693
12694         channel->max_rx = tp->rxq_max;
12695         channel->max_tx = tp->txq_max;
12696
12697         if (netif_running(dev)) {
12698                 channel->rx_count = tp->rxq_cnt;
12699                 channel->tx_count = tp->txq_cnt;
12700         } else {
12701                 if (tp->rxq_req)
12702                         channel->rx_count = tp->rxq_req;
12703                 else
12704                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12705
12706                 if (tp->txq_req)
12707                         channel->tx_count = tp->txq_req;
12708                 else
12709                         channel->tx_count = min(deflt_qs, tp->txq_max);
12710         }
12711 }
12712
12713 static int tg3_set_channels(struct net_device *dev,
12714                             struct ethtool_channels *channel)
12715 {
12716         struct tg3 *tp = netdev_priv(dev);
12717
12718         if (!tg3_flag(tp, SUPPORT_MSIX))
12719                 return -EOPNOTSUPP;
12720
12721         if (channel->rx_count > tp->rxq_max ||
12722             channel->tx_count > tp->txq_max)
12723                 return -EINVAL;
12724
12725         tp->rxq_req = channel->rx_count;
12726         tp->txq_req = channel->tx_count;
12727
12728         if (!netif_running(dev))
12729                 return 0;
12730
12731         tg3_stop(tp);
12732
12733         tg3_carrier_off(tp);
12734
12735         tg3_start(tp, true, false, false);
12736
12737         return 0;
12738 }
12739
12740 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12741 {
12742         switch (stringset) {
12743         case ETH_SS_STATS:
12744                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12745                 break;
12746         case ETH_SS_TEST:
12747                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12748                 break;
12749         default:
12750                 WARN_ON(1);     /* we need a WARN() */
12751                 break;
12752         }
12753 }
12754
12755 static int tg3_set_phys_id(struct net_device *dev,
12756                             enum ethtool_phys_id_state state)
12757 {
12758         struct tg3 *tp = netdev_priv(dev);
12759
12760         if (!netif_running(tp->dev))
12761                 return -EAGAIN;
12762
12763         switch (state) {
12764         case ETHTOOL_ID_ACTIVE:
12765                 return 1;       /* cycle on/off once per second */
12766
12767         case ETHTOOL_ID_ON:
12768                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769                      LED_CTRL_1000MBPS_ON |
12770                      LED_CTRL_100MBPS_ON |
12771                      LED_CTRL_10MBPS_ON |
12772                      LED_CTRL_TRAFFIC_OVERRIDE |
12773                      LED_CTRL_TRAFFIC_BLINK |
12774                      LED_CTRL_TRAFFIC_LED);
12775                 break;
12776
12777         case ETHTOOL_ID_OFF:
12778                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12779                      LED_CTRL_TRAFFIC_OVERRIDE);
12780                 break;
12781
12782         case ETHTOOL_ID_INACTIVE:
12783                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12784                 break;
12785         }
12786
12787         return 0;
12788 }
12789
12790 static void tg3_get_ethtool_stats(struct net_device *dev,
12791                                    struct ethtool_stats *estats, u64 *tmp_stats)
12792 {
12793         struct tg3 *tp = netdev_priv(dev);
12794
12795         if (tp->hw_stats)
12796                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12797         else
12798                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12799 }
12800
12801 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12802 {
12803         int i;
12804         __be32 *buf;
12805         u32 offset = 0, len = 0;
12806         u32 magic, val;
12807
12808         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12809                 return NULL;
12810
12811         if (magic == TG3_EEPROM_MAGIC) {
12812                 for (offset = TG3_NVM_DIR_START;
12813                      offset < TG3_NVM_DIR_END;
12814                      offset += TG3_NVM_DIRENT_SIZE) {
12815                         if (tg3_nvram_read(tp, offset, &val))
12816                                 return NULL;
12817
12818                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12819                             TG3_NVM_DIRTYPE_EXTVPD)
12820                                 break;
12821                 }
12822
12823                 if (offset != TG3_NVM_DIR_END) {
12824                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12825                         if (tg3_nvram_read(tp, offset + 4, &offset))
12826                                 return NULL;
12827
12828                         offset = tg3_nvram_logical_addr(tp, offset);
12829                 }
12830         }
12831
12832         if (!offset || !len) {
12833                 offset = TG3_NVM_VPD_OFF;
12834                 len = TG3_NVM_VPD_LEN;
12835         }
12836
12837         buf = kmalloc(len, GFP_KERNEL);
12838         if (buf == NULL)
12839                 return NULL;
12840
12841         if (magic == TG3_EEPROM_MAGIC) {
12842                 for (i = 0; i < len; i += 4) {
12843                         /* The data is in little-endian format in NVRAM.
12844                          * Use the big-endian read routines to preserve
12845                          * the byte order as it exists in NVRAM.
12846                          */
12847                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12848                                 goto error;
12849                 }
12850         } else {
12851                 u8 *ptr;
12852                 ssize_t cnt;
12853                 unsigned int pos = 0;
12854
12855                 ptr = (u8 *)&buf[0];
12856                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12857                         cnt = pci_read_vpd(tp->pdev, pos,
12858                                            len - pos, ptr);
12859                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12860                                 cnt = 0;
12861                         else if (cnt < 0)
12862                                 goto error;
12863                 }
12864                 if (pos != len)
12865                         goto error;
12866         }
12867
12868         *vpdlen = len;
12869
12870         return buf;
12871
12872 error:
12873         kfree(buf);
12874         return NULL;
12875 }
12876
12877 #define NVRAM_TEST_SIZE 0x100
12878 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12879 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12880 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12881 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12882 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12883 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12884 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12885 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12886
12887 static int tg3_test_nvram(struct tg3 *tp)
12888 {
12889         u32 csum, magic, len;
12890         __be32 *buf;
12891         int i, j, k, err = 0, size;
12892
12893         if (tg3_flag(tp, NO_NVRAM))
12894                 return 0;
12895
12896         if (tg3_nvram_read(tp, 0, &magic) != 0)
12897                 return -EIO;
12898
12899         if (magic == TG3_EEPROM_MAGIC)
12900                 size = NVRAM_TEST_SIZE;
12901         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12902                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12903                     TG3_EEPROM_SB_FORMAT_1) {
12904                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12905                         case TG3_EEPROM_SB_REVISION_0:
12906                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12907                                 break;
12908                         case TG3_EEPROM_SB_REVISION_2:
12909                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12910                                 break;
12911                         case TG3_EEPROM_SB_REVISION_3:
12912                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12913                                 break;
12914                         case TG3_EEPROM_SB_REVISION_4:
12915                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12916                                 break;
12917                         case TG3_EEPROM_SB_REVISION_5:
12918                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12919                                 break;
12920                         case TG3_EEPROM_SB_REVISION_6:
12921                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12922                                 break;
12923                         default:
12924                                 return -EIO;
12925                         }
12926                 } else
12927                         return 0;
12928         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12929                 size = NVRAM_SELFBOOT_HW_SIZE;
12930         else
12931                 return -EIO;
12932
12933         buf = kmalloc(size, GFP_KERNEL);
12934         if (buf == NULL)
12935                 return -ENOMEM;
12936
12937         err = -EIO;
12938         for (i = 0, j = 0; i < size; i += 4, j++) {
12939                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12940                 if (err)
12941                         break;
12942         }
12943         if (i < size)
12944                 goto out;
12945
12946         /* Selfboot format */
12947         magic = be32_to_cpu(buf[0]);
12948         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12949             TG3_EEPROM_MAGIC_FW) {
12950                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12951
12952                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12953                     TG3_EEPROM_SB_REVISION_2) {
12954                         /* For rev 2, the csum doesn't include the MBA. */
12955                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12956                                 csum8 += buf8[i];
12957                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12958                                 csum8 += buf8[i];
12959                 } else {
12960                         for (i = 0; i < size; i++)
12961                                 csum8 += buf8[i];
12962                 }
12963
12964                 if (csum8 == 0) {
12965                         err = 0;
12966                         goto out;
12967                 }
12968
12969                 err = -EIO;
12970                 goto out;
12971         }
12972
12973         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12974             TG3_EEPROM_MAGIC_HW) {
12975                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12976                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12977                 u8 *buf8 = (u8 *) buf;
12978
12979                 /* Separate the parity bits and the data bytes.  */
12980                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12981                         if ((i == 0) || (i == 8)) {
12982                                 int l;
12983                                 u8 msk;
12984
12985                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12986                                         parity[k++] = buf8[i] & msk;
12987                                 i++;
12988                         } else if (i == 16) {
12989                                 int l;
12990                                 u8 msk;
12991
12992                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12993                                         parity[k++] = buf8[i] & msk;
12994                                 i++;
12995
12996                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12997                                         parity[k++] = buf8[i] & msk;
12998                                 i++;
12999                         }
13000                         data[j++] = buf8[i];
13001                 }
13002
13003                 err = -EIO;
13004                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13005                         u8 hw8 = hweight8(data[i]);
13006
13007                         if ((hw8 & 0x1) && parity[i])
13008                                 goto out;
13009                         else if (!(hw8 & 0x1) && !parity[i])
13010                                 goto out;
13011                 }
13012                 err = 0;
13013                 goto out;
13014         }
13015
13016         err = -EIO;
13017
13018         /* Bootstrap checksum at offset 0x10 */
13019         csum = calc_crc((unsigned char *) buf, 0x10);
13020         if (csum != le32_to_cpu(buf[0x10/4]))
13021                 goto out;
13022
13023         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13024         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13025         if (csum != le32_to_cpu(buf[0xfc/4]))
13026                 goto out;
13027
13028         kfree(buf);
13029
13030         buf = tg3_vpd_readblock(tp, &len);
13031         if (!buf)
13032                 return -ENOMEM;
13033
13034         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13035         if (i > 0) {
13036                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13037                 if (j < 0)
13038                         goto out;
13039
13040                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13041                         goto out;
13042
13043                 i += PCI_VPD_LRDT_TAG_SIZE;
13044                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13045                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13046                 if (j > 0) {
13047                         u8 csum8 = 0;
13048
13049                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13050
13051                         for (i = 0; i <= j; i++)
13052                                 csum8 += ((u8 *)buf)[i];
13053
13054                         if (csum8)
13055                                 goto out;
13056                 }
13057         }
13058
13059         err = 0;
13060
13061 out:
13062         kfree(buf);
13063         return err;
13064 }
13065
13066 #define TG3_SERDES_TIMEOUT_SEC  2
13067 #define TG3_COPPER_TIMEOUT_SEC  6
13068
13069 static int tg3_test_link(struct tg3 *tp)
13070 {
13071         int i, max;
13072
13073         if (!netif_running(tp->dev))
13074                 return -ENODEV;
13075
13076         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13077                 max = TG3_SERDES_TIMEOUT_SEC;
13078         else
13079                 max = TG3_COPPER_TIMEOUT_SEC;
13080
13081         for (i = 0; i < max; i++) {
13082                 if (tp->link_up)
13083                         return 0;
13084
13085                 if (msleep_interruptible(1000))
13086                         break;
13087         }
13088
13089         return -EIO;
13090 }
13091
13092 /* Only test the commonly used registers */
13093 static int tg3_test_registers(struct tg3 *tp)
13094 {
13095         int i, is_5705, is_5750;
13096         u32 offset, read_mask, write_mask, val, save_val, read_val;
13097         static struct {
13098                 u16 offset;
13099                 u16 flags;
13100 #define TG3_FL_5705     0x1
13101 #define TG3_FL_NOT_5705 0x2
13102 #define TG3_FL_NOT_5788 0x4
13103 #define TG3_FL_NOT_5750 0x8
13104                 u32 read_mask;
13105                 u32 write_mask;
13106         } reg_tbl[] = {
13107                 /* MAC Control Registers */
13108                 { MAC_MODE, TG3_FL_NOT_5705,
13109                         0x00000000, 0x00ef6f8c },
13110                 { MAC_MODE, TG3_FL_5705,
13111                         0x00000000, 0x01ef6b8c },
13112                 { MAC_STATUS, TG3_FL_NOT_5705,
13113                         0x03800107, 0x00000000 },
13114                 { MAC_STATUS, TG3_FL_5705,
13115                         0x03800100, 0x00000000 },
13116                 { MAC_ADDR_0_HIGH, 0x0000,
13117                         0x00000000, 0x0000ffff },
13118                 { MAC_ADDR_0_LOW, 0x0000,
13119                         0x00000000, 0xffffffff },
13120                 { MAC_RX_MTU_SIZE, 0x0000,
13121                         0x00000000, 0x0000ffff },
13122                 { MAC_TX_MODE, 0x0000,
13123                         0x00000000, 0x00000070 },
13124                 { MAC_TX_LENGTHS, 0x0000,
13125                         0x00000000, 0x00003fff },
13126                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13127                         0x00000000, 0x000007fc },
13128                 { MAC_RX_MODE, TG3_FL_5705,
13129                         0x00000000, 0x000007dc },
13130                 { MAC_HASH_REG_0, 0x0000,
13131                         0x00000000, 0xffffffff },
13132                 { MAC_HASH_REG_1, 0x0000,
13133                         0x00000000, 0xffffffff },
13134                 { MAC_HASH_REG_2, 0x0000,
13135                         0x00000000, 0xffffffff },
13136                 { MAC_HASH_REG_3, 0x0000,
13137                         0x00000000, 0xffffffff },
13138
13139                 /* Receive Data and Receive BD Initiator Control Registers. */
13140                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13141                         0x00000000, 0xffffffff },
13142                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13143                         0x00000000, 0xffffffff },
13144                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13145                         0x00000000, 0x00000003 },
13146                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { RCVDBDI_STD_BD+0, 0x0000,
13149                         0x00000000, 0xffffffff },
13150                 { RCVDBDI_STD_BD+4, 0x0000,
13151                         0x00000000, 0xffffffff },
13152                 { RCVDBDI_STD_BD+8, 0x0000,
13153                         0x00000000, 0xffff0002 },
13154                 { RCVDBDI_STD_BD+0xc, 0x0000,
13155                         0x00000000, 0xffffffff },
13156
13157                 /* Receive BD Initiator Control Registers. */
13158                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13159                         0x00000000, 0xffffffff },
13160                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13161                         0x00000000, 0x000003ff },
13162                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13163                         0x00000000, 0xffffffff },
13164
13165                 /* Host Coalescing Control Registers. */
13166                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13167                         0x00000000, 0x00000004 },
13168                 { HOSTCC_MODE, TG3_FL_5705,
13169                         0x00000000, 0x000000f6 },
13170                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13171                         0x00000000, 0xffffffff },
13172                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13173                         0x00000000, 0x000003ff },
13174                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13175                         0x00000000, 0xffffffff },
13176                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13177                         0x00000000, 0x000003ff },
13178                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13179                         0x00000000, 0xffffffff },
13180                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13181                         0x00000000, 0x000000ff },
13182                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13183                         0x00000000, 0xffffffff },
13184                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13185                         0x00000000, 0x000000ff },
13186                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13187                         0x00000000, 0xffffffff },
13188                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13189                         0x00000000, 0xffffffff },
13190                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13191                         0x00000000, 0xffffffff },
13192                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13193                         0x00000000, 0x000000ff },
13194                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13195                         0x00000000, 0xffffffff },
13196                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13197                         0x00000000, 0x000000ff },
13198                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13199                         0x00000000, 0xffffffff },
13200                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13201                         0x00000000, 0xffffffff },
13202                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13203                         0x00000000, 0xffffffff },
13204                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13205                         0x00000000, 0xffffffff },
13206                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13207                         0x00000000, 0xffffffff },
13208                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13209                         0xffffffff, 0x00000000 },
13210                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13211                         0xffffffff, 0x00000000 },
13212
13213                 /* Buffer Manager Control Registers. */
13214                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13215                         0x00000000, 0x007fff80 },
13216                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13217                         0x00000000, 0x007fffff },
13218                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13219                         0x00000000, 0x0000003f },
13220                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13221                         0x00000000, 0x000001ff },
13222                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13223                         0x00000000, 0x000001ff },
13224                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13225                         0xffffffff, 0x00000000 },
13226                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13227                         0xffffffff, 0x00000000 },
13228
13229                 /* Mailbox Registers */
13230                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13231                         0x00000000, 0x000001ff },
13232                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13233                         0x00000000, 0x000001ff },
13234                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13235                         0x00000000, 0x000007ff },
13236                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13237                         0x00000000, 0x000001ff },
13238
13239                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13240         };
13241
13242         is_5705 = is_5750 = 0;
13243         if (tg3_flag(tp, 5705_PLUS)) {
13244                 is_5705 = 1;
13245                 if (tg3_flag(tp, 5750_PLUS))
13246                         is_5750 = 1;
13247         }
13248
13249         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13250                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13251                         continue;
13252
13253                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13254                         continue;
13255
13256                 if (tg3_flag(tp, IS_5788) &&
13257                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13258                         continue;
13259
13260                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13261                         continue;
13262
13263                 offset = (u32) reg_tbl[i].offset;
13264                 read_mask = reg_tbl[i].read_mask;
13265                 write_mask = reg_tbl[i].write_mask;
13266
13267                 /* Save the original register content */
13268                 save_val = tr32(offset);
13269
13270                 /* Determine the read-only value. */
13271                 read_val = save_val & read_mask;
13272
13273                 /* Write zero to the register, then make sure the read-only bits
13274                  * are not changed and the read/write bits are all zeros.
13275                  */
13276                 tw32(offset, 0);
13277
13278                 val = tr32(offset);
13279
13280                 /* Test the read-only and read/write bits. */
13281                 if (((val & read_mask) != read_val) || (val & write_mask))
13282                         goto out;
13283
13284                 /* Write ones to all the bits defined by RdMask and WrMask, then
13285                  * make sure the read-only bits are not changed and the
13286                  * read/write bits are all ones.
13287                  */
13288                 tw32(offset, read_mask | write_mask);
13289
13290                 val = tr32(offset);
13291
13292                 /* Test the read-only bits. */
13293                 if ((val & read_mask) != read_val)
13294                         goto out;
13295
13296                 /* Test the read/write bits. */
13297                 if ((val & write_mask) != write_mask)
13298                         goto out;
13299
13300                 tw32(offset, save_val);
13301         }
13302
13303         return 0;
13304
13305 out:
13306         if (netif_msg_hw(tp))
13307                 netdev_err(tp->dev,
13308                            "Register test failed at offset %x\n", offset);
13309         tw32(offset, save_val);
13310         return -EIO;
13311 }
13312
13313 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13314 {
13315         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13316         int i;
13317         u32 j;
13318
13319         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13320                 for (j = 0; j < len; j += 4) {
13321                         u32 val;
13322
13323                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13324                         tg3_read_mem(tp, offset + j, &val);
13325                         if (val != test_pattern[i])
13326                                 return -EIO;
13327                 }
13328         }
13329         return 0;
13330 }
13331
13332 static int tg3_test_memory(struct tg3 *tp)
13333 {
13334         static struct mem_entry {
13335                 u32 offset;
13336                 u32 len;
13337         } mem_tbl_570x[] = {
13338                 { 0x00000000, 0x00b50},
13339                 { 0x00002000, 0x1c000},
13340                 { 0xffffffff, 0x00000}
13341         }, mem_tbl_5705[] = {
13342                 { 0x00000100, 0x0000c},
13343                 { 0x00000200, 0x00008},
13344                 { 0x00004000, 0x00800},
13345                 { 0x00006000, 0x01000},
13346                 { 0x00008000, 0x02000},
13347                 { 0x00010000, 0x0e000},
13348                 { 0xffffffff, 0x00000}
13349         }, mem_tbl_5755[] = {
13350                 { 0x00000200, 0x00008},
13351                 { 0x00004000, 0x00800},
13352                 { 0x00006000, 0x00800},
13353                 { 0x00008000, 0x02000},
13354                 { 0x00010000, 0x0c000},
13355                 { 0xffffffff, 0x00000}
13356         }, mem_tbl_5906[] = {
13357                 { 0x00000200, 0x00008},
13358                 { 0x00004000, 0x00400},
13359                 { 0x00006000, 0x00400},
13360                 { 0x00008000, 0x01000},
13361                 { 0x00010000, 0x01000},
13362                 { 0xffffffff, 0x00000}
13363         }, mem_tbl_5717[] = {
13364                 { 0x00000200, 0x00008},
13365                 { 0x00010000, 0x0a000},
13366                 { 0x00020000, 0x13c00},
13367                 { 0xffffffff, 0x00000}
13368         }, mem_tbl_57765[] = {
13369                 { 0x00000200, 0x00008},
13370                 { 0x00004000, 0x00800},
13371                 { 0x00006000, 0x09800},
13372                 { 0x00010000, 0x0a000},
13373                 { 0xffffffff, 0x00000}
13374         };
13375         struct mem_entry *mem_tbl;
13376         int err = 0;
13377         int i;
13378
13379         if (tg3_flag(tp, 5717_PLUS))
13380                 mem_tbl = mem_tbl_5717;
13381         else if (tg3_flag(tp, 57765_CLASS) ||
13382                  tg3_asic_rev(tp) == ASIC_REV_5762)
13383                 mem_tbl = mem_tbl_57765;
13384         else if (tg3_flag(tp, 5755_PLUS))
13385                 mem_tbl = mem_tbl_5755;
13386         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13387                 mem_tbl = mem_tbl_5906;
13388         else if (tg3_flag(tp, 5705_PLUS))
13389                 mem_tbl = mem_tbl_5705;
13390         else
13391                 mem_tbl = mem_tbl_570x;
13392
13393         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13394                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13395                 if (err)
13396                         break;
13397         }
13398
13399         return err;
13400 }
13401
13402 #define TG3_TSO_MSS             500
13403
13404 #define TG3_TSO_IP_HDR_LEN      20
13405 #define TG3_TSO_TCP_HDR_LEN     20
13406 #define TG3_TSO_TCP_OPT_LEN     12
13407
13408 static const u8 tg3_tso_header[] = {
13409 0x08, 0x00,
13410 0x45, 0x00, 0x00, 0x00,
13411 0x00, 0x00, 0x40, 0x00,
13412 0x40, 0x06, 0x00, 0x00,
13413 0x0a, 0x00, 0x00, 0x01,
13414 0x0a, 0x00, 0x00, 0x02,
13415 0x0d, 0x00, 0xe0, 0x00,
13416 0x00, 0x00, 0x01, 0x00,
13417 0x00, 0x00, 0x02, 0x00,
13418 0x80, 0x10, 0x10, 0x00,
13419 0x14, 0x09, 0x00, 0x00,
13420 0x01, 0x01, 0x08, 0x0a,
13421 0x11, 0x11, 0x11, 0x11,
13422 0x11, 0x11, 0x11, 0x11,
13423 };
13424
13425 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13426 {
13427         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13428         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13429         u32 budget;
13430         struct sk_buff *skb;
13431         u8 *tx_data, *rx_data;
13432         dma_addr_t map;
13433         int num_pkts, tx_len, rx_len, i, err;
13434         struct tg3_rx_buffer_desc *desc;
13435         struct tg3_napi *tnapi, *rnapi;
13436         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13437
13438         tnapi = &tp->napi[0];
13439         rnapi = &tp->napi[0];
13440         if (tp->irq_cnt > 1) {
13441                 if (tg3_flag(tp, ENABLE_RSS))
13442                         rnapi = &tp->napi[1];
13443                 if (tg3_flag(tp, ENABLE_TSS))
13444                         tnapi = &tp->napi[1];
13445         }
13446         coal_now = tnapi->coal_now | rnapi->coal_now;
13447
13448         err = -EIO;
13449
13450         tx_len = pktsz;
13451         skb = netdev_alloc_skb(tp->dev, tx_len);
13452         if (!skb)
13453                 return -ENOMEM;
13454
13455         tx_data = skb_put(skb, tx_len);
13456         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13457         memset(tx_data + ETH_ALEN, 0x0, 8);
13458
13459         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13460
13461         if (tso_loopback) {
13462                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13463
13464                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13465                               TG3_TSO_TCP_OPT_LEN;
13466
13467                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13468                        sizeof(tg3_tso_header));
13469                 mss = TG3_TSO_MSS;
13470
13471                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13472                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13473
13474                 /* Set the total length field in the IP header */
13475                 iph->tot_len = htons((u16)(mss + hdr_len));
13476
13477                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13478                               TXD_FLAG_CPU_POST_DMA);
13479
13480                 if (tg3_flag(tp, HW_TSO_1) ||
13481                     tg3_flag(tp, HW_TSO_2) ||
13482                     tg3_flag(tp, HW_TSO_3)) {
13483                         struct tcphdr *th;
13484                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13485                         th = (struct tcphdr *)&tx_data[val];
13486                         th->check = 0;
13487                 } else
13488                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13489
13490                 if (tg3_flag(tp, HW_TSO_3)) {
13491                         mss |= (hdr_len & 0xc) << 12;
13492                         if (hdr_len & 0x10)
13493                                 base_flags |= 0x00000010;
13494                         base_flags |= (hdr_len & 0x3e0) << 5;
13495                 } else if (tg3_flag(tp, HW_TSO_2))
13496                         mss |= hdr_len << 9;
13497                 else if (tg3_flag(tp, HW_TSO_1) ||
13498                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13499                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13500                 } else {
13501                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13502                 }
13503
13504                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13505         } else {
13506                 num_pkts = 1;
13507                 data_off = ETH_HLEN;
13508
13509                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13510                     tx_len > VLAN_ETH_FRAME_LEN)
13511                         base_flags |= TXD_FLAG_JMB_PKT;
13512         }
13513
13514         for (i = data_off; i < tx_len; i++)
13515                 tx_data[i] = (u8) (i & 0xff);
13516
13517         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13518         if (pci_dma_mapping_error(tp->pdev, map)) {
13519                 dev_kfree_skb(skb);
13520                 return -EIO;
13521         }
13522
13523         val = tnapi->tx_prod;
13524         tnapi->tx_buffers[val].skb = skb;
13525         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13526
13527         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13528                rnapi->coal_now);
13529
13530         udelay(10);
13531
13532         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13533
13534         budget = tg3_tx_avail(tnapi);
13535         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13536                             base_flags | TXD_FLAG_END, mss, 0)) {
13537                 tnapi->tx_buffers[val].skb = NULL;
13538                 dev_kfree_skb(skb);
13539                 return -EIO;
13540         }
13541
13542         tnapi->tx_prod++;
13543
13544         /* Sync BD data before updating mailbox */
13545         wmb();
13546
13547         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13548         tr32_mailbox(tnapi->prodmbox);
13549
13550         udelay(10);
13551
13552         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13553         for (i = 0; i < 35; i++) {
13554                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13555                        coal_now);
13556
13557                 udelay(10);
13558
13559                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13560                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13561                 if ((tx_idx == tnapi->tx_prod) &&
13562                     (rx_idx == (rx_start_idx + num_pkts)))
13563                         break;
13564         }
13565
13566         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13567         dev_kfree_skb(skb);
13568
13569         if (tx_idx != tnapi->tx_prod)
13570                 goto out;
13571
13572         if (rx_idx != rx_start_idx + num_pkts)
13573                 goto out;
13574
13575         val = data_off;
13576         while (rx_idx != rx_start_idx) {
13577                 desc = &rnapi->rx_rcb[rx_start_idx++];
13578                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13579                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13580
13581                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13582                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13583                         goto out;
13584
13585                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13586                          - ETH_FCS_LEN;
13587
13588                 if (!tso_loopback) {
13589                         if (rx_len != tx_len)
13590                                 goto out;
13591
13592                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13593                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13594                                         goto out;
13595                         } else {
13596                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13597                                         goto out;
13598                         }
13599                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13600                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13601                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13602                         goto out;
13603                 }
13604
13605                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13606                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13607                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13608                                              mapping);
13609                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13610                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13611                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13612                                              mapping);
13613                 } else
13614                         goto out;
13615
13616                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13617                                             PCI_DMA_FROMDEVICE);
13618
13619                 rx_data += TG3_RX_OFFSET(tp);
13620                 for (i = data_off; i < rx_len; i++, val++) {
13621                         if (*(rx_data + i) != (u8) (val & 0xff))
13622                                 goto out;
13623                 }
13624         }
13625
13626         err = 0;
13627
13628         /* tg3_free_rings will unmap and free the rx_data */
13629 out:
13630         return err;
13631 }
13632
13633 #define TG3_STD_LOOPBACK_FAILED         1
13634 #define TG3_JMB_LOOPBACK_FAILED         2
13635 #define TG3_TSO_LOOPBACK_FAILED         4
13636 #define TG3_LOOPBACK_FAILED \
13637         (TG3_STD_LOOPBACK_FAILED | \
13638          TG3_JMB_LOOPBACK_FAILED | \
13639          TG3_TSO_LOOPBACK_FAILED)
13640
13641 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13642 {
13643         int err = -EIO;
13644         u32 eee_cap;
13645         u32 jmb_pkt_sz = 9000;
13646
13647         if (tp->dma_limit)
13648                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13649
13650         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13651         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13652
13653         if (!netif_running(tp->dev)) {
13654                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13656                 if (do_extlpbk)
13657                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13658                 goto done;
13659         }
13660
13661         err = tg3_reset_hw(tp, true);
13662         if (err) {
13663                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13664                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13665                 if (do_extlpbk)
13666                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13667                 goto done;
13668         }
13669
13670         if (tg3_flag(tp, ENABLE_RSS)) {
13671                 int i;
13672
13673                 /* Reroute all rx packets to the 1st queue */
13674                 for (i = MAC_RSS_INDIR_TBL_0;
13675                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13676                         tw32(i, 0x0);
13677         }
13678
13679         /* HW errata - mac loopback fails in some cases on 5780.
13680          * Normal traffic and PHY loopback are not affected by
13681          * errata.  Also, the MAC loopback test is deprecated for
13682          * all newer ASIC revisions.
13683          */
13684         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13685             !tg3_flag(tp, CPMU_PRESENT)) {
13686                 tg3_mac_loopback(tp, true);
13687
13688                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13689                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13690
13691                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13692                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13693                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13694
13695                 tg3_mac_loopback(tp, false);
13696         }
13697
13698         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13699             !tg3_flag(tp, USE_PHYLIB)) {
13700                 int i;
13701
13702                 tg3_phy_lpbk_set(tp, 0, false);
13703
13704                 /* Wait for link */
13705                 for (i = 0; i < 100; i++) {
13706                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13707                                 break;
13708                         mdelay(1);
13709                 }
13710
13711                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13712                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13713                 if (tg3_flag(tp, TSO_CAPABLE) &&
13714                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13715                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13716                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13717                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13718                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13719
13720                 if (do_extlpbk) {
13721                         tg3_phy_lpbk_set(tp, 0, true);
13722
13723                         /* All link indications report up, but the hardware
13724                          * isn't really ready for about 20 msec.  Double it
13725                          * to be sure.
13726                          */
13727                         mdelay(40);
13728
13729                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13730                                 data[TG3_EXT_LOOPB_TEST] |=
13731                                                         TG3_STD_LOOPBACK_FAILED;
13732                         if (tg3_flag(tp, TSO_CAPABLE) &&
13733                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13734                                 data[TG3_EXT_LOOPB_TEST] |=
13735                                                         TG3_TSO_LOOPBACK_FAILED;
13736                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13737                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13738                                 data[TG3_EXT_LOOPB_TEST] |=
13739                                                         TG3_JMB_LOOPBACK_FAILED;
13740                 }
13741
13742                 /* Re-enable gphy autopowerdown. */
13743                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13744                         tg3_phy_toggle_apd(tp, true);
13745         }
13746
13747         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13748                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13749
13750 done:
13751         tp->phy_flags |= eee_cap;
13752
13753         return err;
13754 }
13755
13756 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13757                           u64 *data)
13758 {
13759         struct tg3 *tp = netdev_priv(dev);
13760         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13761
13762         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13763                 if (tg3_power_up(tp)) {
13764                         etest->flags |= ETH_TEST_FL_FAILED;
13765                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13766                         return;
13767                 }
13768                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13769         }
13770
13771         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13772
13773         if (tg3_test_nvram(tp) != 0) {
13774                 etest->flags |= ETH_TEST_FL_FAILED;
13775                 data[TG3_NVRAM_TEST] = 1;
13776         }
13777         if (!doextlpbk && tg3_test_link(tp)) {
13778                 etest->flags |= ETH_TEST_FL_FAILED;
13779                 data[TG3_LINK_TEST] = 1;
13780         }
13781         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13782                 int err, err2 = 0, irq_sync = 0;
13783
13784                 if (netif_running(dev)) {
13785                         tg3_phy_stop(tp);
13786                         tg3_netif_stop(tp);
13787                         irq_sync = 1;
13788                 }
13789
13790                 tg3_full_lock(tp, irq_sync);
13791                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13792                 err = tg3_nvram_lock(tp);
13793                 tg3_halt_cpu(tp, RX_CPU_BASE);
13794                 if (!tg3_flag(tp, 5705_PLUS))
13795                         tg3_halt_cpu(tp, TX_CPU_BASE);
13796                 if (!err)
13797                         tg3_nvram_unlock(tp);
13798
13799                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13800                         tg3_phy_reset(tp);
13801
13802                 if (tg3_test_registers(tp) != 0) {
13803                         etest->flags |= ETH_TEST_FL_FAILED;
13804                         data[TG3_REGISTER_TEST] = 1;
13805                 }
13806
13807                 if (tg3_test_memory(tp) != 0) {
13808                         etest->flags |= ETH_TEST_FL_FAILED;
13809                         data[TG3_MEMORY_TEST] = 1;
13810                 }
13811
13812                 if (doextlpbk)
13813                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13814
13815                 if (tg3_test_loopback(tp, data, doextlpbk))
13816                         etest->flags |= ETH_TEST_FL_FAILED;
13817
13818                 tg3_full_unlock(tp);
13819
13820                 if (tg3_test_interrupt(tp) != 0) {
13821                         etest->flags |= ETH_TEST_FL_FAILED;
13822                         data[TG3_INTERRUPT_TEST] = 1;
13823                 }
13824
13825                 tg3_full_lock(tp, 0);
13826
13827                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13828                 if (netif_running(dev)) {
13829                         tg3_flag_set(tp, INIT_COMPLETE);
13830                         err2 = tg3_restart_hw(tp, true);
13831                         if (!err2)
13832                                 tg3_netif_start(tp);
13833                 }
13834
13835                 tg3_full_unlock(tp);
13836
13837                 if (irq_sync && !err2)
13838                         tg3_phy_start(tp);
13839         }
13840         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13841                 tg3_power_down_prepare(tp);
13842
13843 }
13844
13845 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13846 {
13847         struct tg3 *tp = netdev_priv(dev);
13848         struct hwtstamp_config stmpconf;
13849
13850         if (!tg3_flag(tp, PTP_CAPABLE))
13851                 return -EOPNOTSUPP;
13852
13853         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13854                 return -EFAULT;
13855
13856         if (stmpconf.flags)
13857                 return -EINVAL;
13858
13859         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13860             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13861                 return -ERANGE;
13862
13863         switch (stmpconf.rx_filter) {
13864         case HWTSTAMP_FILTER_NONE:
13865                 tp->rxptpctl = 0;
13866                 break;
13867         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13869                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13870                 break;
13871         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13873                                TG3_RX_PTP_CTL_SYNC_EVNT;
13874                 break;
13875         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13877                                TG3_RX_PTP_CTL_DELAY_REQ;
13878                 break;
13879         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13881                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13882                 break;
13883         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13885                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13886                 break;
13887         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13889                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13890                 break;
13891         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13892                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13893                                TG3_RX_PTP_CTL_SYNC_EVNT;
13894                 break;
13895         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13896                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13897                                TG3_RX_PTP_CTL_SYNC_EVNT;
13898                 break;
13899         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13900                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13901                                TG3_RX_PTP_CTL_SYNC_EVNT;
13902                 break;
13903         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13904                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13905                                TG3_RX_PTP_CTL_DELAY_REQ;
13906                 break;
13907         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13908                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13909                                TG3_RX_PTP_CTL_DELAY_REQ;
13910                 break;
13911         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13912                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13913                                TG3_RX_PTP_CTL_DELAY_REQ;
13914                 break;
13915         default:
13916                 return -ERANGE;
13917         }
13918
13919         if (netif_running(dev) && tp->rxptpctl)
13920                 tw32(TG3_RX_PTP_CTL,
13921                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13922
13923         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13924                 tg3_flag_set(tp, TX_TSTAMP_EN);
13925         else
13926                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13927
13928         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13929                 -EFAULT : 0;
13930 }
13931
13932 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13933 {
13934         struct tg3 *tp = netdev_priv(dev);
13935         struct hwtstamp_config stmpconf;
13936
13937         if (!tg3_flag(tp, PTP_CAPABLE))
13938                 return -EOPNOTSUPP;
13939
13940         stmpconf.flags = 0;
13941         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13942                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13943
13944         switch (tp->rxptpctl) {
13945         case 0:
13946                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13947                 break;
13948         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13949                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13950                 break;
13951         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13959                 break;
13960         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13961                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13962                 break;
13963         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13964                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13965                 break;
13966         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13967                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13968                 break;
13969         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13970                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13971                 break;
13972         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13973                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13974                 break;
13975         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13976                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13977                 break;
13978         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13979                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13980                 break;
13981         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13982                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13983                 break;
13984         default:
13985                 WARN_ON_ONCE(1);
13986                 return -ERANGE;
13987         }
13988
13989         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13990                 -EFAULT : 0;
13991 }
13992
13993 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13994 {
13995         struct mii_ioctl_data *data = if_mii(ifr);
13996         struct tg3 *tp = netdev_priv(dev);
13997         int err;
13998
13999         if (tg3_flag(tp, USE_PHYLIB)) {
14000                 struct phy_device *phydev;
14001                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14002                         return -EAGAIN;
14003                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14004                 return phy_mii_ioctl(phydev, ifr, cmd);
14005         }
14006
14007         switch (cmd) {
14008         case SIOCGMIIPHY:
14009                 data->phy_id = tp->phy_addr;
14010
14011                 /* fall through */
14012         case SIOCGMIIREG: {
14013                 u32 mii_regval;
14014
14015                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14016                         break;                  /* We have no PHY */
14017
14018                 if (!netif_running(dev))
14019                         return -EAGAIN;
14020
14021                 spin_lock_bh(&tp->lock);
14022                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14023                                     data->reg_num & 0x1f, &mii_regval);
14024                 spin_unlock_bh(&tp->lock);
14025
14026                 data->val_out = mii_regval;
14027
14028                 return err;
14029         }
14030
14031         case SIOCSMIIREG:
14032                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14033                         break;                  /* We have no PHY */
14034
14035                 if (!netif_running(dev))
14036                         return -EAGAIN;
14037
14038                 spin_lock_bh(&tp->lock);
14039                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14040                                      data->reg_num & 0x1f, data->val_in);
14041                 spin_unlock_bh(&tp->lock);
14042
14043                 return err;
14044
14045         case SIOCSHWTSTAMP:
14046                 return tg3_hwtstamp_set(dev, ifr);
14047
14048         case SIOCGHWTSTAMP:
14049                 return tg3_hwtstamp_get(dev, ifr);
14050
14051         default:
14052                 /* do nothing */
14053                 break;
14054         }
14055         return -EOPNOTSUPP;
14056 }
14057
14058 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14059 {
14060         struct tg3 *tp = netdev_priv(dev);
14061
14062         memcpy(ec, &tp->coal, sizeof(*ec));
14063         return 0;
14064 }
14065
14066 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14067 {
14068         struct tg3 *tp = netdev_priv(dev);
14069         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14070         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14071
14072         if (!tg3_flag(tp, 5705_PLUS)) {
14073                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14074                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14075                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14076                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14077         }
14078
14079         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14080             (!ec->rx_coalesce_usecs) ||
14081             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14082             (!ec->tx_coalesce_usecs) ||
14083             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14084             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14085             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14086             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14087             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14088             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14089             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14090             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14091                 return -EINVAL;
14092
14093         /* Only copy relevant parameters, ignore all others. */
14094         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14095         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14096         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14097         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14098         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14099         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14100         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14101         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14102         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14103
14104         if (netif_running(dev)) {
14105                 tg3_full_lock(tp, 0);
14106                 __tg3_set_coalesce(tp, &tp->coal);
14107                 tg3_full_unlock(tp);
14108         }
14109         return 0;
14110 }
14111
14112 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14113 {
14114         struct tg3 *tp = netdev_priv(dev);
14115
14116         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14117                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14118                 return -EOPNOTSUPP;
14119         }
14120
14121         if (edata->advertised != tp->eee.advertised) {
14122                 netdev_warn(tp->dev,
14123                             "Direct manipulation of EEE advertisement is not supported\n");
14124                 return -EINVAL;
14125         }
14126
14127         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14128                 netdev_warn(tp->dev,
14129                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14130                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14131                 return -EINVAL;
14132         }
14133
14134         tp->eee = *edata;
14135
14136         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14137         tg3_warn_mgmt_link_flap(tp);
14138
14139         if (netif_running(tp->dev)) {
14140                 tg3_full_lock(tp, 0);
14141                 tg3_setup_eee(tp);
14142                 tg3_phy_reset(tp);
14143                 tg3_full_unlock(tp);
14144         }
14145
14146         return 0;
14147 }
14148
14149 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14150 {
14151         struct tg3 *tp = netdev_priv(dev);
14152
14153         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14154                 netdev_warn(tp->dev,
14155                             "Board does not support EEE!\n");
14156                 return -EOPNOTSUPP;
14157         }
14158
14159         *edata = tp->eee;
14160         return 0;
14161 }
14162
14163 static const struct ethtool_ops tg3_ethtool_ops = {
14164         .get_drvinfo            = tg3_get_drvinfo,
14165         .get_regs_len           = tg3_get_regs_len,
14166         .get_regs               = tg3_get_regs,
14167         .get_wol                = tg3_get_wol,
14168         .set_wol                = tg3_set_wol,
14169         .get_msglevel           = tg3_get_msglevel,
14170         .set_msglevel           = tg3_set_msglevel,
14171         .nway_reset             = tg3_nway_reset,
14172         .get_link               = ethtool_op_get_link,
14173         .get_eeprom_len         = tg3_get_eeprom_len,
14174         .get_eeprom             = tg3_get_eeprom,
14175         .set_eeprom             = tg3_set_eeprom,
14176         .get_ringparam          = tg3_get_ringparam,
14177         .set_ringparam          = tg3_set_ringparam,
14178         .get_pauseparam         = tg3_get_pauseparam,
14179         .set_pauseparam         = tg3_set_pauseparam,
14180         .self_test              = tg3_self_test,
14181         .get_strings            = tg3_get_strings,
14182         .set_phys_id            = tg3_set_phys_id,
14183         .get_ethtool_stats      = tg3_get_ethtool_stats,
14184         .get_coalesce           = tg3_get_coalesce,
14185         .set_coalesce           = tg3_set_coalesce,
14186         .get_sset_count         = tg3_get_sset_count,
14187         .get_rxnfc              = tg3_get_rxnfc,
14188         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14189         .get_rxfh               = tg3_get_rxfh,
14190         .set_rxfh               = tg3_set_rxfh,
14191         .get_channels           = tg3_get_channels,
14192         .set_channels           = tg3_set_channels,
14193         .get_ts_info            = tg3_get_ts_info,
14194         .get_eee                = tg3_get_eee,
14195         .set_eee                = tg3_set_eee,
14196         .get_link_ksettings     = tg3_get_link_ksettings,
14197         .set_link_ksettings     = tg3_set_link_ksettings,
14198 };
14199
14200 static void tg3_get_stats64(struct net_device *dev,
14201                             struct rtnl_link_stats64 *stats)
14202 {
14203         struct tg3 *tp = netdev_priv(dev);
14204
14205         spin_lock_bh(&tp->lock);
14206         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14207                 *stats = tp->net_stats_prev;
14208                 spin_unlock_bh(&tp->lock);
14209                 return;
14210         }
14211
14212         tg3_get_nstats(tp, stats);
14213         spin_unlock_bh(&tp->lock);
14214 }
14215
14216 static void tg3_set_rx_mode(struct net_device *dev)
14217 {
14218         struct tg3 *tp = netdev_priv(dev);
14219
14220         if (!netif_running(dev))
14221                 return;
14222
14223         tg3_full_lock(tp, 0);
14224         __tg3_set_rx_mode(dev);
14225         tg3_full_unlock(tp);
14226 }
14227
14228 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14229                                int new_mtu)
14230 {
14231         dev->mtu = new_mtu;
14232
14233         if (new_mtu > ETH_DATA_LEN) {
14234                 if (tg3_flag(tp, 5780_CLASS)) {
14235                         netdev_update_features(dev);
14236                         tg3_flag_clear(tp, TSO_CAPABLE);
14237                 } else {
14238                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14239                 }
14240         } else {
14241                 if (tg3_flag(tp, 5780_CLASS)) {
14242                         tg3_flag_set(tp, TSO_CAPABLE);
14243                         netdev_update_features(dev);
14244                 }
14245                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14246         }
14247 }
14248
14249 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14250 {
14251         struct tg3 *tp = netdev_priv(dev);
14252         int err;
14253         bool reset_phy = false;
14254
14255         if (!netif_running(dev)) {
14256                 /* We'll just catch it later when the
14257                  * device is up'd.
14258                  */
14259                 tg3_set_mtu(dev, tp, new_mtu);
14260                 return 0;
14261         }
14262
14263         tg3_phy_stop(tp);
14264
14265         tg3_netif_stop(tp);
14266
14267         tg3_set_mtu(dev, tp, new_mtu);
14268
14269         tg3_full_lock(tp, 1);
14270
14271         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14272
14273         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14274          * breaks all requests to 256 bytes.
14275          */
14276         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14277             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14278             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14279             tg3_asic_rev(tp) == ASIC_REV_5720)
14280                 reset_phy = true;
14281
14282         err = tg3_restart_hw(tp, reset_phy);
14283
14284         if (!err)
14285                 tg3_netif_start(tp);
14286
14287         tg3_full_unlock(tp);
14288
14289         if (!err)
14290                 tg3_phy_start(tp);
14291
14292         return err;
14293 }
14294
14295 static const struct net_device_ops tg3_netdev_ops = {
14296         .ndo_open               = tg3_open,
14297         .ndo_stop               = tg3_close,
14298         .ndo_start_xmit         = tg3_start_xmit,
14299         .ndo_get_stats64        = tg3_get_stats64,
14300         .ndo_validate_addr      = eth_validate_addr,
14301         .ndo_set_rx_mode        = tg3_set_rx_mode,
14302         .ndo_set_mac_address    = tg3_set_mac_addr,
14303         .ndo_do_ioctl           = tg3_ioctl,
14304         .ndo_tx_timeout         = tg3_tx_timeout,
14305         .ndo_change_mtu         = tg3_change_mtu,
14306         .ndo_fix_features       = tg3_fix_features,
14307         .ndo_set_features       = tg3_set_features,
14308 #ifdef CONFIG_NET_POLL_CONTROLLER
14309         .ndo_poll_controller    = tg3_poll_controller,
14310 #endif
14311 };
14312
14313 static void tg3_get_eeprom_size(struct tg3 *tp)
14314 {
14315         u32 cursize, val, magic;
14316
14317         tp->nvram_size = EEPROM_CHIP_SIZE;
14318
14319         if (tg3_nvram_read(tp, 0, &magic) != 0)
14320                 return;
14321
14322         if ((magic != TG3_EEPROM_MAGIC) &&
14323             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14324             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14325                 return;
14326
14327         /*
14328          * Size the chip by reading offsets at increasing powers of two.
14329          * When we encounter our validation signature, we know the addressing
14330          * has wrapped around, and thus have our chip size.
14331          */
14332         cursize = 0x10;
14333
14334         while (cursize < tp->nvram_size) {
14335                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14336                         return;
14337
14338                 if (val == magic)
14339                         break;
14340
14341                 cursize <<= 1;
14342         }
14343
14344         tp->nvram_size = cursize;
14345 }
14346
14347 static void tg3_get_nvram_size(struct tg3 *tp)
14348 {
14349         u32 val;
14350
14351         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14352                 return;
14353
14354         /* Selfboot format */
14355         if (val != TG3_EEPROM_MAGIC) {
14356                 tg3_get_eeprom_size(tp);
14357                 return;
14358         }
14359
14360         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14361                 if (val != 0) {
14362                         /* This is confusing.  We want to operate on the
14363                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14364                          * call will read from NVRAM and byteswap the data
14365                          * according to the byteswapping settings for all
14366                          * other register accesses.  This ensures the data we
14367                          * want will always reside in the lower 16-bits.
14368                          * However, the data in NVRAM is in LE format, which
14369                          * means the data from the NVRAM read will always be
14370                          * opposite the endianness of the CPU.  The 16-bit
14371                          * byteswap then brings the data to CPU endianness.
14372                          */
14373                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14374                         return;
14375                 }
14376         }
14377         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14378 }
14379
14380 static void tg3_get_nvram_info(struct tg3 *tp)
14381 {
14382         u32 nvcfg1;
14383
14384         nvcfg1 = tr32(NVRAM_CFG1);
14385         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14386                 tg3_flag_set(tp, FLASH);
14387         } else {
14388                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14389                 tw32(NVRAM_CFG1, nvcfg1);
14390         }
14391
14392         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14393             tg3_flag(tp, 5780_CLASS)) {
14394                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14395                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14396                         tp->nvram_jedecnum = JEDEC_ATMEL;
14397                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14398                         tg3_flag_set(tp, NVRAM_BUFFERED);
14399                         break;
14400                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14401                         tp->nvram_jedecnum = JEDEC_ATMEL;
14402                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14403                         break;
14404                 case FLASH_VENDOR_ATMEL_EEPROM:
14405                         tp->nvram_jedecnum = JEDEC_ATMEL;
14406                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14407                         tg3_flag_set(tp, NVRAM_BUFFERED);
14408                         break;
14409                 case FLASH_VENDOR_ST:
14410                         tp->nvram_jedecnum = JEDEC_ST;
14411                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14412                         tg3_flag_set(tp, NVRAM_BUFFERED);
14413                         break;
14414                 case FLASH_VENDOR_SAIFUN:
14415                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14416                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14417                         break;
14418                 case FLASH_VENDOR_SST_SMALL:
14419                 case FLASH_VENDOR_SST_LARGE:
14420                         tp->nvram_jedecnum = JEDEC_SST;
14421                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14422                         break;
14423                 }
14424         } else {
14425                 tp->nvram_jedecnum = JEDEC_ATMEL;
14426                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14427                 tg3_flag_set(tp, NVRAM_BUFFERED);
14428         }
14429 }
14430
14431 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14432 {
14433         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14434         case FLASH_5752PAGE_SIZE_256:
14435                 tp->nvram_pagesize = 256;
14436                 break;
14437         case FLASH_5752PAGE_SIZE_512:
14438                 tp->nvram_pagesize = 512;
14439                 break;
14440         case FLASH_5752PAGE_SIZE_1K:
14441                 tp->nvram_pagesize = 1024;
14442                 break;
14443         case FLASH_5752PAGE_SIZE_2K:
14444                 tp->nvram_pagesize = 2048;
14445                 break;
14446         case FLASH_5752PAGE_SIZE_4K:
14447                 tp->nvram_pagesize = 4096;
14448                 break;
14449         case FLASH_5752PAGE_SIZE_264:
14450                 tp->nvram_pagesize = 264;
14451                 break;
14452         case FLASH_5752PAGE_SIZE_528:
14453                 tp->nvram_pagesize = 528;
14454                 break;
14455         }
14456 }
14457
14458 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14459 {
14460         u32 nvcfg1;
14461
14462         nvcfg1 = tr32(NVRAM_CFG1);
14463
14464         /* NVRAM protection for TPM */
14465         if (nvcfg1 & (1 << 27))
14466                 tg3_flag_set(tp, PROTECTED_NVRAM);
14467
14468         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14469         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14470         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14471                 tp->nvram_jedecnum = JEDEC_ATMEL;
14472                 tg3_flag_set(tp, NVRAM_BUFFERED);
14473                 break;
14474         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14475                 tp->nvram_jedecnum = JEDEC_ATMEL;
14476                 tg3_flag_set(tp, NVRAM_BUFFERED);
14477                 tg3_flag_set(tp, FLASH);
14478                 break;
14479         case FLASH_5752VENDOR_ST_M45PE10:
14480         case FLASH_5752VENDOR_ST_M45PE20:
14481         case FLASH_5752VENDOR_ST_M45PE40:
14482                 tp->nvram_jedecnum = JEDEC_ST;
14483                 tg3_flag_set(tp, NVRAM_BUFFERED);
14484                 tg3_flag_set(tp, FLASH);
14485                 break;
14486         }
14487
14488         if (tg3_flag(tp, FLASH)) {
14489                 tg3_nvram_get_pagesize(tp, nvcfg1);
14490         } else {
14491                 /* For eeprom, set pagesize to maximum eeprom size */
14492                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14493
14494                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14495                 tw32(NVRAM_CFG1, nvcfg1);
14496         }
14497 }
14498
14499 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14500 {
14501         u32 nvcfg1, protect = 0;
14502
14503         nvcfg1 = tr32(NVRAM_CFG1);
14504
14505         /* NVRAM protection for TPM */
14506         if (nvcfg1 & (1 << 27)) {
14507                 tg3_flag_set(tp, PROTECTED_NVRAM);
14508                 protect = 1;
14509         }
14510
14511         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14512         switch (nvcfg1) {
14513         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14514         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14515         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14516         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14517                 tp->nvram_jedecnum = JEDEC_ATMEL;
14518                 tg3_flag_set(tp, NVRAM_BUFFERED);
14519                 tg3_flag_set(tp, FLASH);
14520                 tp->nvram_pagesize = 264;
14521                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14522                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14523                         tp->nvram_size = (protect ? 0x3e200 :
14524                                           TG3_NVRAM_SIZE_512KB);
14525                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14526                         tp->nvram_size = (protect ? 0x1f200 :
14527                                           TG3_NVRAM_SIZE_256KB);
14528                 else
14529                         tp->nvram_size = (protect ? 0x1f200 :
14530                                           TG3_NVRAM_SIZE_128KB);
14531                 break;
14532         case FLASH_5752VENDOR_ST_M45PE10:
14533         case FLASH_5752VENDOR_ST_M45PE20:
14534         case FLASH_5752VENDOR_ST_M45PE40:
14535                 tp->nvram_jedecnum = JEDEC_ST;
14536                 tg3_flag_set(tp, NVRAM_BUFFERED);
14537                 tg3_flag_set(tp, FLASH);
14538                 tp->nvram_pagesize = 256;
14539                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14540                         tp->nvram_size = (protect ?
14541                                           TG3_NVRAM_SIZE_64KB :
14542                                           TG3_NVRAM_SIZE_128KB);
14543                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14544                         tp->nvram_size = (protect ?
14545                                           TG3_NVRAM_SIZE_64KB :
14546                                           TG3_NVRAM_SIZE_256KB);
14547                 else
14548                         tp->nvram_size = (protect ?
14549                                           TG3_NVRAM_SIZE_128KB :
14550                                           TG3_NVRAM_SIZE_512KB);
14551                 break;
14552         }
14553 }
14554
14555 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14556 {
14557         u32 nvcfg1;
14558
14559         nvcfg1 = tr32(NVRAM_CFG1);
14560
14561         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14562         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14563         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14564         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14565         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14566                 tp->nvram_jedecnum = JEDEC_ATMEL;
14567                 tg3_flag_set(tp, NVRAM_BUFFERED);
14568                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14569
14570                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14571                 tw32(NVRAM_CFG1, nvcfg1);
14572                 break;
14573         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14574         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14575         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14576         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14577                 tp->nvram_jedecnum = JEDEC_ATMEL;
14578                 tg3_flag_set(tp, NVRAM_BUFFERED);
14579                 tg3_flag_set(tp, FLASH);
14580                 tp->nvram_pagesize = 264;
14581                 break;
14582         case FLASH_5752VENDOR_ST_M45PE10:
14583         case FLASH_5752VENDOR_ST_M45PE20:
14584         case FLASH_5752VENDOR_ST_M45PE40:
14585                 tp->nvram_jedecnum = JEDEC_ST;
14586                 tg3_flag_set(tp, NVRAM_BUFFERED);
14587                 tg3_flag_set(tp, FLASH);
14588                 tp->nvram_pagesize = 256;
14589                 break;
14590         }
14591 }
14592
14593 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14594 {
14595         u32 nvcfg1, protect = 0;
14596
14597         nvcfg1 = tr32(NVRAM_CFG1);
14598
14599         /* NVRAM protection for TPM */
14600         if (nvcfg1 & (1 << 27)) {
14601                 tg3_flag_set(tp, PROTECTED_NVRAM);
14602                 protect = 1;
14603         }
14604
14605         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14606         switch (nvcfg1) {
14607         case FLASH_5761VENDOR_ATMEL_ADB021D:
14608         case FLASH_5761VENDOR_ATMEL_ADB041D:
14609         case FLASH_5761VENDOR_ATMEL_ADB081D:
14610         case FLASH_5761VENDOR_ATMEL_ADB161D:
14611         case FLASH_5761VENDOR_ATMEL_MDB021D:
14612         case FLASH_5761VENDOR_ATMEL_MDB041D:
14613         case FLASH_5761VENDOR_ATMEL_MDB081D:
14614         case FLASH_5761VENDOR_ATMEL_MDB161D:
14615                 tp->nvram_jedecnum = JEDEC_ATMEL;
14616                 tg3_flag_set(tp, NVRAM_BUFFERED);
14617                 tg3_flag_set(tp, FLASH);
14618                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14619                 tp->nvram_pagesize = 256;
14620                 break;
14621         case FLASH_5761VENDOR_ST_A_M45PE20:
14622         case FLASH_5761VENDOR_ST_A_M45PE40:
14623         case FLASH_5761VENDOR_ST_A_M45PE80:
14624         case FLASH_5761VENDOR_ST_A_M45PE16:
14625         case FLASH_5761VENDOR_ST_M_M45PE20:
14626         case FLASH_5761VENDOR_ST_M_M45PE40:
14627         case FLASH_5761VENDOR_ST_M_M45PE80:
14628         case FLASH_5761VENDOR_ST_M_M45PE16:
14629                 tp->nvram_jedecnum = JEDEC_ST;
14630                 tg3_flag_set(tp, NVRAM_BUFFERED);
14631                 tg3_flag_set(tp, FLASH);
14632                 tp->nvram_pagesize = 256;
14633                 break;
14634         }
14635
14636         if (protect) {
14637                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14638         } else {
14639                 switch (nvcfg1) {
14640                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14641                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14642                 case FLASH_5761VENDOR_ST_A_M45PE16:
14643                 case FLASH_5761VENDOR_ST_M_M45PE16:
14644                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14645                         break;
14646                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14647                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14648                 case FLASH_5761VENDOR_ST_A_M45PE80:
14649                 case FLASH_5761VENDOR_ST_M_M45PE80:
14650                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14651                         break;
14652                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14653                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14654                 case FLASH_5761VENDOR_ST_A_M45PE40:
14655                 case FLASH_5761VENDOR_ST_M_M45PE40:
14656                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14657                         break;
14658                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14659                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14660                 case FLASH_5761VENDOR_ST_A_M45PE20:
14661                 case FLASH_5761VENDOR_ST_M_M45PE20:
14662                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14663                         break;
14664                 }
14665         }
14666 }
14667
14668 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14669 {
14670         tp->nvram_jedecnum = JEDEC_ATMEL;
14671         tg3_flag_set(tp, NVRAM_BUFFERED);
14672         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14673 }
14674
14675 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14676 {
14677         u32 nvcfg1;
14678
14679         nvcfg1 = tr32(NVRAM_CFG1);
14680
14681         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14682         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14683         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14684                 tp->nvram_jedecnum = JEDEC_ATMEL;
14685                 tg3_flag_set(tp, NVRAM_BUFFERED);
14686                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14687
14688                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14689                 tw32(NVRAM_CFG1, nvcfg1);
14690                 return;
14691         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14692         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14693         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14694         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14695         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14696         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14697         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14698                 tp->nvram_jedecnum = JEDEC_ATMEL;
14699                 tg3_flag_set(tp, NVRAM_BUFFERED);
14700                 tg3_flag_set(tp, FLASH);
14701
14702                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14703                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14704                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14705                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14706                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14707                         break;
14708                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14709                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14710                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14711                         break;
14712                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14713                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14714                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14715                         break;
14716                 }
14717                 break;
14718         case FLASH_5752VENDOR_ST_M45PE10:
14719         case FLASH_5752VENDOR_ST_M45PE20:
14720         case FLASH_5752VENDOR_ST_M45PE40:
14721                 tp->nvram_jedecnum = JEDEC_ST;
14722                 tg3_flag_set(tp, NVRAM_BUFFERED);
14723                 tg3_flag_set(tp, FLASH);
14724
14725                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14726                 case FLASH_5752VENDOR_ST_M45PE10:
14727                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14728                         break;
14729                 case FLASH_5752VENDOR_ST_M45PE20:
14730                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14731                         break;
14732                 case FLASH_5752VENDOR_ST_M45PE40:
14733                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14734                         break;
14735                 }
14736                 break;
14737         default:
14738                 tg3_flag_set(tp, NO_NVRAM);
14739                 return;
14740         }
14741
14742         tg3_nvram_get_pagesize(tp, nvcfg1);
14743         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14744                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14745 }
14746
14747
14748 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14749 {
14750         u32 nvcfg1;
14751
14752         nvcfg1 = tr32(NVRAM_CFG1);
14753
14754         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14755         case FLASH_5717VENDOR_ATMEL_EEPROM:
14756         case FLASH_5717VENDOR_MICRO_EEPROM:
14757                 tp->nvram_jedecnum = JEDEC_ATMEL;
14758                 tg3_flag_set(tp, NVRAM_BUFFERED);
14759                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14760
14761                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14762                 tw32(NVRAM_CFG1, nvcfg1);
14763                 return;
14764         case FLASH_5717VENDOR_ATMEL_MDB011D:
14765         case FLASH_5717VENDOR_ATMEL_ADB011B:
14766         case FLASH_5717VENDOR_ATMEL_ADB011D:
14767         case FLASH_5717VENDOR_ATMEL_MDB021D:
14768         case FLASH_5717VENDOR_ATMEL_ADB021B:
14769         case FLASH_5717VENDOR_ATMEL_ADB021D:
14770         case FLASH_5717VENDOR_ATMEL_45USPT:
14771                 tp->nvram_jedecnum = JEDEC_ATMEL;
14772                 tg3_flag_set(tp, NVRAM_BUFFERED);
14773                 tg3_flag_set(tp, FLASH);
14774
14775                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14776                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14777                         /* Detect size with tg3_nvram_get_size() */
14778                         break;
14779                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14780                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14781                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14782                         break;
14783                 default:
14784                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14785                         break;
14786                 }
14787                 break;
14788         case FLASH_5717VENDOR_ST_M_M25PE10:
14789         case FLASH_5717VENDOR_ST_A_M25PE10:
14790         case FLASH_5717VENDOR_ST_M_M45PE10:
14791         case FLASH_5717VENDOR_ST_A_M45PE10:
14792         case FLASH_5717VENDOR_ST_M_M25PE20:
14793         case FLASH_5717VENDOR_ST_A_M25PE20:
14794         case FLASH_5717VENDOR_ST_M_M45PE20:
14795         case FLASH_5717VENDOR_ST_A_M45PE20:
14796         case FLASH_5717VENDOR_ST_25USPT:
14797         case FLASH_5717VENDOR_ST_45USPT:
14798                 tp->nvram_jedecnum = JEDEC_ST;
14799                 tg3_flag_set(tp, NVRAM_BUFFERED);
14800                 tg3_flag_set(tp, FLASH);
14801
14802                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14803                 case FLASH_5717VENDOR_ST_M_M25PE20:
14804                 case FLASH_5717VENDOR_ST_M_M45PE20:
14805                         /* Detect size with tg3_nvram_get_size() */
14806                         break;
14807                 case FLASH_5717VENDOR_ST_A_M25PE20:
14808                 case FLASH_5717VENDOR_ST_A_M45PE20:
14809                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14810                         break;
14811                 default:
14812                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14813                         break;
14814                 }
14815                 break;
14816         default:
14817                 tg3_flag_set(tp, NO_NVRAM);
14818                 return;
14819         }
14820
14821         tg3_nvram_get_pagesize(tp, nvcfg1);
14822         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14823                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14824 }
14825
14826 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14827 {
14828         u32 nvcfg1, nvmpinstrp, nv_status;
14829
14830         nvcfg1 = tr32(NVRAM_CFG1);
14831         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14832
14833         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14834                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14835                         tg3_flag_set(tp, NO_NVRAM);
14836                         return;
14837                 }
14838
14839                 switch (nvmpinstrp) {
14840                 case FLASH_5762_MX25L_100:
14841                 case FLASH_5762_MX25L_200:
14842                 case FLASH_5762_MX25L_400:
14843                 case FLASH_5762_MX25L_800:
14844                 case FLASH_5762_MX25L_160_320:
14845                         tp->nvram_pagesize = 4096;
14846                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14847                         tg3_flag_set(tp, NVRAM_BUFFERED);
14848                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14849                         tg3_flag_set(tp, FLASH);
14850                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14851                         tp->nvram_size =
14852                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14853                                                 AUTOSENSE_DEVID_MASK)
14854                                         << AUTOSENSE_SIZE_IN_MB);
14855                         return;
14856
14857                 case FLASH_5762_EEPROM_HD:
14858                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14859                         break;
14860                 case FLASH_5762_EEPROM_LD:
14861                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14862                         break;
14863                 case FLASH_5720VENDOR_M_ST_M45PE20:
14864                         /* This pinstrap supports multiple sizes, so force it
14865                          * to read the actual size from location 0xf0.
14866                          */
14867                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14868                         break;
14869                 }
14870         }
14871
14872         switch (nvmpinstrp) {
14873         case FLASH_5720_EEPROM_HD:
14874         case FLASH_5720_EEPROM_LD:
14875                 tp->nvram_jedecnum = JEDEC_ATMEL;
14876                 tg3_flag_set(tp, NVRAM_BUFFERED);
14877
14878                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14879                 tw32(NVRAM_CFG1, nvcfg1);
14880                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14881                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14882                 else
14883                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14884                 return;
14885         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14886         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14887         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14888         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14889         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14890         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14891         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14892         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14893         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14894         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14895         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14896         case FLASH_5720VENDOR_ATMEL_45USPT:
14897                 tp->nvram_jedecnum = JEDEC_ATMEL;
14898                 tg3_flag_set(tp, NVRAM_BUFFERED);
14899                 tg3_flag_set(tp, FLASH);
14900
14901                 switch (nvmpinstrp) {
14902                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14903                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14904                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14905                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14906                         break;
14907                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14908                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14909                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14910                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14911                         break;
14912                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14913                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14914                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14915                         break;
14916                 default:
14917                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14918                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14919                         break;
14920                 }
14921                 break;
14922         case FLASH_5720VENDOR_M_ST_M25PE10:
14923         case FLASH_5720VENDOR_M_ST_M45PE10:
14924         case FLASH_5720VENDOR_A_ST_M25PE10:
14925         case FLASH_5720VENDOR_A_ST_M45PE10:
14926         case FLASH_5720VENDOR_M_ST_M25PE20:
14927         case FLASH_5720VENDOR_M_ST_M45PE20:
14928         case FLASH_5720VENDOR_A_ST_M25PE20:
14929         case FLASH_5720VENDOR_A_ST_M45PE20:
14930         case FLASH_5720VENDOR_M_ST_M25PE40:
14931         case FLASH_5720VENDOR_M_ST_M45PE40:
14932         case FLASH_5720VENDOR_A_ST_M25PE40:
14933         case FLASH_5720VENDOR_A_ST_M45PE40:
14934         case FLASH_5720VENDOR_M_ST_M25PE80:
14935         case FLASH_5720VENDOR_M_ST_M45PE80:
14936         case FLASH_5720VENDOR_A_ST_M25PE80:
14937         case FLASH_5720VENDOR_A_ST_M45PE80:
14938         case FLASH_5720VENDOR_ST_25USPT:
14939         case FLASH_5720VENDOR_ST_45USPT:
14940                 tp->nvram_jedecnum = JEDEC_ST;
14941                 tg3_flag_set(tp, NVRAM_BUFFERED);
14942                 tg3_flag_set(tp, FLASH);
14943
14944                 switch (nvmpinstrp) {
14945                 case FLASH_5720VENDOR_M_ST_M25PE20:
14946                 case FLASH_5720VENDOR_M_ST_M45PE20:
14947                 case FLASH_5720VENDOR_A_ST_M25PE20:
14948                 case FLASH_5720VENDOR_A_ST_M45PE20:
14949                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14950                         break;
14951                 case FLASH_5720VENDOR_M_ST_M25PE40:
14952                 case FLASH_5720VENDOR_M_ST_M45PE40:
14953                 case FLASH_5720VENDOR_A_ST_M25PE40:
14954                 case FLASH_5720VENDOR_A_ST_M45PE40:
14955                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14956                         break;
14957                 case FLASH_5720VENDOR_M_ST_M25PE80:
14958                 case FLASH_5720VENDOR_M_ST_M45PE80:
14959                 case FLASH_5720VENDOR_A_ST_M25PE80:
14960                 case FLASH_5720VENDOR_A_ST_M45PE80:
14961                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14962                         break;
14963                 default:
14964                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14965                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14966                         break;
14967                 }
14968                 break;
14969         default:
14970                 tg3_flag_set(tp, NO_NVRAM);
14971                 return;
14972         }
14973
14974         tg3_nvram_get_pagesize(tp, nvcfg1);
14975         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14976                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14977
14978         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14979                 u32 val;
14980
14981                 if (tg3_nvram_read(tp, 0, &val))
14982                         return;
14983
14984                 if (val != TG3_EEPROM_MAGIC &&
14985                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14986                         tg3_flag_set(tp, NO_NVRAM);
14987         }
14988 }
14989
14990 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14991 static void tg3_nvram_init(struct tg3 *tp)
14992 {
14993         if (tg3_flag(tp, IS_SSB_CORE)) {
14994                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14995                 tg3_flag_clear(tp, NVRAM);
14996                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14997                 tg3_flag_set(tp, NO_NVRAM);
14998                 return;
14999         }
15000
15001         tw32_f(GRC_EEPROM_ADDR,
15002              (EEPROM_ADDR_FSM_RESET |
15003               (EEPROM_DEFAULT_CLOCK_PERIOD <<
15004                EEPROM_ADDR_CLKPERD_SHIFT)));
15005
15006         msleep(1);
15007
15008         /* Enable seeprom accesses. */
15009         tw32_f(GRC_LOCAL_CTRL,
15010              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15011         udelay(100);
15012
15013         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15014             tg3_asic_rev(tp) != ASIC_REV_5701) {
15015                 tg3_flag_set(tp, NVRAM);
15016
15017                 if (tg3_nvram_lock(tp)) {
15018                         netdev_warn(tp->dev,
15019                                     "Cannot get nvram lock, %s failed\n",
15020                                     __func__);
15021                         return;
15022                 }
15023                 tg3_enable_nvram_access(tp);
15024
15025                 tp->nvram_size = 0;
15026
15027                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15028                         tg3_get_5752_nvram_info(tp);
15029                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15030                         tg3_get_5755_nvram_info(tp);
15031                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15032                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15033                          tg3_asic_rev(tp) == ASIC_REV_5785)
15034                         tg3_get_5787_nvram_info(tp);
15035                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15036                         tg3_get_5761_nvram_info(tp);
15037                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15038                         tg3_get_5906_nvram_info(tp);
15039                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15040                          tg3_flag(tp, 57765_CLASS))
15041                         tg3_get_57780_nvram_info(tp);
15042                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15043                          tg3_asic_rev(tp) == ASIC_REV_5719)
15044                         tg3_get_5717_nvram_info(tp);
15045                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15046                          tg3_asic_rev(tp) == ASIC_REV_5762)
15047                         tg3_get_5720_nvram_info(tp);
15048                 else
15049                         tg3_get_nvram_info(tp);
15050
15051                 if (tp->nvram_size == 0)
15052                         tg3_get_nvram_size(tp);
15053
15054                 tg3_disable_nvram_access(tp);
15055                 tg3_nvram_unlock(tp);
15056
15057         } else {
15058                 tg3_flag_clear(tp, NVRAM);
15059                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15060
15061                 tg3_get_eeprom_size(tp);
15062         }
15063 }
15064
15065 struct subsys_tbl_ent {
15066         u16 subsys_vendor, subsys_devid;
15067         u32 phy_id;
15068 };
15069
15070 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15071         /* Broadcom boards. */
15072         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15073           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15074         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15075           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15076         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15077           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15078         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15079           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15080         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15081           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15082         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15083           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15084         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15085           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15086         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15087           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15088         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15089           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15090         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15091           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15092         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15093           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15094
15095         /* 3com boards. */
15096         { TG3PCI_SUBVENDOR_ID_3COM,
15097           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15098         { TG3PCI_SUBVENDOR_ID_3COM,
15099           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15100         { TG3PCI_SUBVENDOR_ID_3COM,
15101           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15102         { TG3PCI_SUBVENDOR_ID_3COM,
15103           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15104         { TG3PCI_SUBVENDOR_ID_3COM,
15105           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15106
15107         /* DELL boards. */
15108         { TG3PCI_SUBVENDOR_ID_DELL,
15109           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15110         { TG3PCI_SUBVENDOR_ID_DELL,
15111           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15112         { TG3PCI_SUBVENDOR_ID_DELL,
15113           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15114         { TG3PCI_SUBVENDOR_ID_DELL,
15115           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15116
15117         /* Compaq boards. */
15118         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15119           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15120         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15121           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15122         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15123           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15124         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15125           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15126         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15127           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15128
15129         /* IBM boards. */
15130         { TG3PCI_SUBVENDOR_ID_IBM,
15131           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15132 };
15133
15134 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15135 {
15136         int i;
15137
15138         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15139                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15140                      tp->pdev->subsystem_vendor) &&
15141                     (subsys_id_to_phy_id[i].subsys_devid ==
15142                      tp->pdev->subsystem_device))
15143                         return &subsys_id_to_phy_id[i];
15144         }
15145         return NULL;
15146 }
15147
15148 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15149 {
15150         u32 val;
15151
15152         tp->phy_id = TG3_PHY_ID_INVALID;
15153         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15154
15155         /* Assume an onboard device and WOL capable by default.  */
15156         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15157         tg3_flag_set(tp, WOL_CAP);
15158
15159         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15160                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15161                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15162                         tg3_flag_set(tp, IS_NIC);
15163                 }
15164                 val = tr32(VCPU_CFGSHDW);
15165                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15166                         tg3_flag_set(tp, ASPM_WORKAROUND);
15167                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15168                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15169                         tg3_flag_set(tp, WOL_ENABLE);
15170                         device_set_wakeup_enable(&tp->pdev->dev, true);
15171                 }
15172                 goto done;
15173         }
15174
15175         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15176         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15177                 u32 nic_cfg, led_cfg;
15178                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15179                 u32 nic_phy_id, ver, eeprom_phy_id;
15180                 int eeprom_phy_serdes = 0;
15181
15182                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15183                 tp->nic_sram_data_cfg = nic_cfg;
15184
15185                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15186                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15187                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15188                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15189                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15190                     (ver > 0) && (ver < 0x100))
15191                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15192
15193                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15194                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15195
15196                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15197                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15198                     tg3_asic_rev(tp) == ASIC_REV_5720)
15199                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15200
15201                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15202                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15203                         eeprom_phy_serdes = 1;
15204
15205                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15206                 if (nic_phy_id != 0) {
15207                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15208                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15209
15210                         eeprom_phy_id  = (id1 >> 16) << 10;
15211                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15212                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15213                 } else
15214                         eeprom_phy_id = 0;
15215
15216                 tp->phy_id = eeprom_phy_id;
15217                 if (eeprom_phy_serdes) {
15218                         if (!tg3_flag(tp, 5705_PLUS))
15219                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15220                         else
15221                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15222                 }
15223
15224                 if (tg3_flag(tp, 5750_PLUS))
15225                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15226                                     SHASTA_EXT_LED_MODE_MASK);
15227                 else
15228                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15229
15230                 switch (led_cfg) {
15231                 default:
15232                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15233                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15234                         break;
15235
15236                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15237                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15238                         break;
15239
15240                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15241                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15242
15243                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15244                          * read on some older 5700/5701 bootcode.
15245                          */
15246                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15247                             tg3_asic_rev(tp) == ASIC_REV_5701)
15248                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15249
15250                         break;
15251
15252                 case SHASTA_EXT_LED_SHARED:
15253                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15254                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15255                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15256                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15257                                                  LED_CTRL_MODE_PHY_2);
15258
15259                         if (tg3_flag(tp, 5717_PLUS) ||
15260                             tg3_asic_rev(tp) == ASIC_REV_5762)
15261                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15262                                                 LED_CTRL_BLINK_RATE_MASK;
15263
15264                         break;
15265
15266                 case SHASTA_EXT_LED_MAC:
15267                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15268                         break;
15269
15270                 case SHASTA_EXT_LED_COMBO:
15271                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15272                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15273                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15274                                                  LED_CTRL_MODE_PHY_2);
15275                         break;
15276
15277                 }
15278
15279                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15280                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15281                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15282                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15283
15284                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15285                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15286
15287                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15288                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15289                         if ((tp->pdev->subsystem_vendor ==
15290                              PCI_VENDOR_ID_ARIMA) &&
15291                             (tp->pdev->subsystem_device == 0x205a ||
15292                              tp->pdev->subsystem_device == 0x2063))
15293                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15294                 } else {
15295                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15296                         tg3_flag_set(tp, IS_NIC);
15297                 }
15298
15299                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15300                         tg3_flag_set(tp, ENABLE_ASF);
15301                         if (tg3_flag(tp, 5750_PLUS))
15302                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15303                 }
15304
15305                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15306                     tg3_flag(tp, 5750_PLUS))
15307                         tg3_flag_set(tp, ENABLE_APE);
15308
15309                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15310                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15311                         tg3_flag_clear(tp, WOL_CAP);
15312
15313                 if (tg3_flag(tp, WOL_CAP) &&
15314                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15315                         tg3_flag_set(tp, WOL_ENABLE);
15316                         device_set_wakeup_enable(&tp->pdev->dev, true);
15317                 }
15318
15319                 if (cfg2 & (1 << 17))
15320                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15321
15322                 /* serdes signal pre-emphasis in register 0x590 set by */
15323                 /* bootcode if bit 18 is set */
15324                 if (cfg2 & (1 << 18))
15325                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15326
15327                 if ((tg3_flag(tp, 57765_PLUS) ||
15328                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15329                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15330                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15331                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15332
15333                 if (tg3_flag(tp, PCI_EXPRESS)) {
15334                         u32 cfg3;
15335
15336                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15337                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15338                             !tg3_flag(tp, 57765_PLUS) &&
15339                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15340                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15341                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15342                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15343                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15344                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15345                 }
15346
15347                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15348                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15349                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15350                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15351                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15352                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15353
15354                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15355                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15356         }
15357 done:
15358         if (tg3_flag(tp, WOL_CAP))
15359                 device_set_wakeup_enable(&tp->pdev->dev,
15360                                          tg3_flag(tp, WOL_ENABLE));
15361         else
15362                 device_set_wakeup_capable(&tp->pdev->dev, false);
15363 }
15364
15365 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15366 {
15367         int i, err;
15368         u32 val2, off = offset * 8;
15369
15370         err = tg3_nvram_lock(tp);
15371         if (err)
15372                 return err;
15373
15374         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15375         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15376                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15377         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15378         udelay(10);
15379
15380         for (i = 0; i < 100; i++) {
15381                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15382                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15383                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15384                         break;
15385                 }
15386                 udelay(10);
15387         }
15388
15389         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15390
15391         tg3_nvram_unlock(tp);
15392         if (val2 & APE_OTP_STATUS_CMD_DONE)
15393                 return 0;
15394
15395         return -EBUSY;
15396 }
15397
15398 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15399 {
15400         int i;
15401         u32 val;
15402
15403         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15404         tw32(OTP_CTRL, cmd);
15405
15406         /* Wait for up to 1 ms for command to execute. */
15407         for (i = 0; i < 100; i++) {
15408                 val = tr32(OTP_STATUS);
15409                 if (val & OTP_STATUS_CMD_DONE)
15410                         break;
15411                 udelay(10);
15412         }
15413
15414         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15415 }
15416
15417 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15418  * configuration is a 32-bit value that straddles the alignment boundary.
15419  * We do two 32-bit reads and then shift and merge the results.
15420  */
15421 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15422 {
15423         u32 bhalf_otp, thalf_otp;
15424
15425         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15426
15427         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15428                 return 0;
15429
15430         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15431
15432         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15433                 return 0;
15434
15435         thalf_otp = tr32(OTP_READ_DATA);
15436
15437         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15438
15439         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15440                 return 0;
15441
15442         bhalf_otp = tr32(OTP_READ_DATA);
15443
15444         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15445 }
15446
15447 static void tg3_phy_init_link_config(struct tg3 *tp)
15448 {
15449         u32 adv = ADVERTISED_Autoneg;
15450
15451         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15452                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15453                         adv |= ADVERTISED_1000baseT_Half;
15454                 adv |= ADVERTISED_1000baseT_Full;
15455         }
15456
15457         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15458                 adv |= ADVERTISED_100baseT_Half |
15459                        ADVERTISED_100baseT_Full |
15460                        ADVERTISED_10baseT_Half |
15461                        ADVERTISED_10baseT_Full |
15462                        ADVERTISED_TP;
15463         else
15464                 adv |= ADVERTISED_FIBRE;
15465
15466         tp->link_config.advertising = adv;
15467         tp->link_config.speed = SPEED_UNKNOWN;
15468         tp->link_config.duplex = DUPLEX_UNKNOWN;
15469         tp->link_config.autoneg = AUTONEG_ENABLE;
15470         tp->link_config.active_speed = SPEED_UNKNOWN;
15471         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15472
15473         tp->old_link = -1;
15474 }
15475
15476 static int tg3_phy_probe(struct tg3 *tp)
15477 {
15478         u32 hw_phy_id_1, hw_phy_id_2;
15479         u32 hw_phy_id, hw_phy_id_masked;
15480         int err;
15481
15482         /* flow control autonegotiation is default behavior */
15483         tg3_flag_set(tp, PAUSE_AUTONEG);
15484         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15485
15486         if (tg3_flag(tp, ENABLE_APE)) {
15487                 switch (tp->pci_fn) {
15488                 case 0:
15489                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15490                         break;
15491                 case 1:
15492                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15493                         break;
15494                 case 2:
15495                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15496                         break;
15497                 case 3:
15498                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15499                         break;
15500                 }
15501         }
15502
15503         if (!tg3_flag(tp, ENABLE_ASF) &&
15504             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15505             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15506                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15507                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15508
15509         if (tg3_flag(tp, USE_PHYLIB))
15510                 return tg3_phy_init(tp);
15511
15512         /* Reading the PHY ID register can conflict with ASF
15513          * firmware access to the PHY hardware.
15514          */
15515         err = 0;
15516         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15517                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15518         } else {
15519                 /* Now read the physical PHY_ID from the chip and verify
15520                  * that it is sane.  If it doesn't look good, we fall back
15521                  * to either the hard-coded table based PHY_ID and failing
15522                  * that the value found in the eeprom area.
15523                  */
15524                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15525                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15526
15527                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15528                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15529                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15530
15531                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15532         }
15533
15534         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15535                 tp->phy_id = hw_phy_id;
15536                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15537                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15538                 else
15539                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15540         } else {
15541                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15542                         /* Do nothing, phy ID already set up in
15543                          * tg3_get_eeprom_hw_cfg().
15544                          */
15545                 } else {
15546                         struct subsys_tbl_ent *p;
15547
15548                         /* No eeprom signature?  Try the hardcoded
15549                          * subsys device table.
15550                          */
15551                         p = tg3_lookup_by_subsys(tp);
15552                         if (p) {
15553                                 tp->phy_id = p->phy_id;
15554                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15555                                 /* For now we saw the IDs 0xbc050cd0,
15556                                  * 0xbc050f80 and 0xbc050c30 on devices
15557                                  * connected to an BCM4785 and there are
15558                                  * probably more. Just assume that the phy is
15559                                  * supported when it is connected to a SSB core
15560                                  * for now.
15561                                  */
15562                                 return -ENODEV;
15563                         }
15564
15565                         if (!tp->phy_id ||
15566                             tp->phy_id == TG3_PHY_ID_BCM8002)
15567                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15568                 }
15569         }
15570
15571         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15572             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15573              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15574              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15575              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15576              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15577               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15578              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15579               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15580                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15581
15582                 tp->eee.supported = SUPPORTED_100baseT_Full |
15583                                     SUPPORTED_1000baseT_Full;
15584                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15585                                      ADVERTISED_1000baseT_Full;
15586                 tp->eee.eee_enabled = 1;
15587                 tp->eee.tx_lpi_enabled = 1;
15588                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15589         }
15590
15591         tg3_phy_init_link_config(tp);
15592
15593         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15594             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15595             !tg3_flag(tp, ENABLE_APE) &&
15596             !tg3_flag(tp, ENABLE_ASF)) {
15597                 u32 bmsr, dummy;
15598
15599                 tg3_readphy(tp, MII_BMSR, &bmsr);
15600                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15601                     (bmsr & BMSR_LSTATUS))
15602                         goto skip_phy_reset;
15603
15604                 err = tg3_phy_reset(tp);
15605                 if (err)
15606                         return err;
15607
15608                 tg3_phy_set_wirespeed(tp);
15609
15610                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15611                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15612                                             tp->link_config.flowctrl);
15613
15614                         tg3_writephy(tp, MII_BMCR,
15615                                      BMCR_ANENABLE | BMCR_ANRESTART);
15616                 }
15617         }
15618
15619 skip_phy_reset:
15620         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15621                 err = tg3_init_5401phy_dsp(tp);
15622                 if (err)
15623                         return err;
15624
15625                 err = tg3_init_5401phy_dsp(tp);
15626         }
15627
15628         return err;
15629 }
15630
15631 static void tg3_read_vpd(struct tg3 *tp)
15632 {
15633         u8 *vpd_data;
15634         unsigned int block_end, rosize, len;
15635         u32 vpdlen;
15636         int j, i = 0;
15637
15638         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15639         if (!vpd_data)
15640                 goto out_no_vpd;
15641
15642         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15643         if (i < 0)
15644                 goto out_not_found;
15645
15646         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15647         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15648         i += PCI_VPD_LRDT_TAG_SIZE;
15649
15650         if (block_end > vpdlen)
15651                 goto out_not_found;
15652
15653         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15654                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15655         if (j > 0) {
15656                 len = pci_vpd_info_field_size(&vpd_data[j]);
15657
15658                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15659                 if (j + len > block_end || len != 4 ||
15660                     memcmp(&vpd_data[j], "1028", 4))
15661                         goto partno;
15662
15663                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15664                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15665                 if (j < 0)
15666                         goto partno;
15667
15668                 len = pci_vpd_info_field_size(&vpd_data[j]);
15669
15670                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15671                 if (j + len > block_end)
15672                         goto partno;
15673
15674                 if (len >= sizeof(tp->fw_ver))
15675                         len = sizeof(tp->fw_ver) - 1;
15676                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15677                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15678                          &vpd_data[j]);
15679         }
15680
15681 partno:
15682         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15683                                       PCI_VPD_RO_KEYWORD_PARTNO);
15684         if (i < 0)
15685                 goto out_not_found;
15686
15687         len = pci_vpd_info_field_size(&vpd_data[i]);
15688
15689         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15690         if (len > TG3_BPN_SIZE ||
15691             (len + i) > vpdlen)
15692                 goto out_not_found;
15693
15694         memcpy(tp->board_part_number, &vpd_data[i], len);
15695
15696 out_not_found:
15697         kfree(vpd_data);
15698         if (tp->board_part_number[0])
15699                 return;
15700
15701 out_no_vpd:
15702         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15703                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15704                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15705                         strcpy(tp->board_part_number, "BCM5717");
15706                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15707                         strcpy(tp->board_part_number, "BCM5718");
15708                 else
15709                         goto nomatch;
15710         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15711                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15712                         strcpy(tp->board_part_number, "BCM57780");
15713                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15714                         strcpy(tp->board_part_number, "BCM57760");
15715                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15716                         strcpy(tp->board_part_number, "BCM57790");
15717                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15718                         strcpy(tp->board_part_number, "BCM57788");
15719                 else
15720                         goto nomatch;
15721         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15722                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15723                         strcpy(tp->board_part_number, "BCM57761");
15724                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15725                         strcpy(tp->board_part_number, "BCM57765");
15726                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15727                         strcpy(tp->board_part_number, "BCM57781");
15728                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15729                         strcpy(tp->board_part_number, "BCM57785");
15730                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15731                         strcpy(tp->board_part_number, "BCM57791");
15732                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15733                         strcpy(tp->board_part_number, "BCM57795");
15734                 else
15735                         goto nomatch;
15736         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15737                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15738                         strcpy(tp->board_part_number, "BCM57762");
15739                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15740                         strcpy(tp->board_part_number, "BCM57766");
15741                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15742                         strcpy(tp->board_part_number, "BCM57782");
15743                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15744                         strcpy(tp->board_part_number, "BCM57786");
15745                 else
15746                         goto nomatch;
15747         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15748                 strcpy(tp->board_part_number, "BCM95906");
15749         } else {
15750 nomatch:
15751                 strcpy(tp->board_part_number, "none");
15752         }
15753 }
15754
15755 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15756 {
15757         u32 val;
15758
15759         if (tg3_nvram_read(tp, offset, &val) ||
15760             (val & 0xfc000000) != 0x0c000000 ||
15761             tg3_nvram_read(tp, offset + 4, &val) ||
15762             val != 0)
15763                 return 0;
15764
15765         return 1;
15766 }
15767
15768 static void tg3_read_bc_ver(struct tg3 *tp)
15769 {
15770         u32 val, offset, start, ver_offset;
15771         int i, dst_off;
15772         bool newver = false;
15773
15774         if (tg3_nvram_read(tp, 0xc, &offset) ||
15775             tg3_nvram_read(tp, 0x4, &start))
15776                 return;
15777
15778         offset = tg3_nvram_logical_addr(tp, offset);
15779
15780         if (tg3_nvram_read(tp, offset, &val))
15781                 return;
15782
15783         if ((val & 0xfc000000) == 0x0c000000) {
15784                 if (tg3_nvram_read(tp, offset + 4, &val))
15785                         return;
15786
15787                 if (val == 0)
15788                         newver = true;
15789         }
15790
15791         dst_off = strlen(tp->fw_ver);
15792
15793         if (newver) {
15794                 if (TG3_VER_SIZE - dst_off < 16 ||
15795                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15796                         return;
15797
15798                 offset = offset + ver_offset - start;
15799                 for (i = 0; i < 16; i += 4) {
15800                         __be32 v;
15801                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15802                                 return;
15803
15804                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15805                 }
15806         } else {
15807                 u32 major, minor;
15808
15809                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15810                         return;
15811
15812                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15813                         TG3_NVM_BCVER_MAJSFT;
15814                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15815                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15816                          "v%d.%02d", major, minor);
15817         }
15818 }
15819
15820 static void tg3_read_hwsb_ver(struct tg3 *tp)
15821 {
15822         u32 val, major, minor;
15823
15824         /* Use native endian representation */
15825         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15826                 return;
15827
15828         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15829                 TG3_NVM_HWSB_CFG1_MAJSFT;
15830         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15831                 TG3_NVM_HWSB_CFG1_MINSFT;
15832
15833         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15834 }
15835
15836 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15837 {
15838         u32 offset, major, minor, build;
15839
15840         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15841
15842         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15843                 return;
15844
15845         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15846         case TG3_EEPROM_SB_REVISION_0:
15847                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15848                 break;
15849         case TG3_EEPROM_SB_REVISION_2:
15850                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15851                 break;
15852         case TG3_EEPROM_SB_REVISION_3:
15853                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15854                 break;
15855         case TG3_EEPROM_SB_REVISION_4:
15856                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15857                 break;
15858         case TG3_EEPROM_SB_REVISION_5:
15859                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15860                 break;
15861         case TG3_EEPROM_SB_REVISION_6:
15862                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15863                 break;
15864         default:
15865                 return;
15866         }
15867
15868         if (tg3_nvram_read(tp, offset, &val))
15869                 return;
15870
15871         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15872                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15873         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15874                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15875         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15876
15877         if (minor > 99 || build > 26)
15878                 return;
15879
15880         offset = strlen(tp->fw_ver);
15881         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15882                  " v%d.%02d", major, minor);
15883
15884         if (build > 0) {
15885                 offset = strlen(tp->fw_ver);
15886                 if (offset < TG3_VER_SIZE - 1)
15887                         tp->fw_ver[offset] = 'a' + build - 1;
15888         }
15889 }
15890
15891 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15892 {
15893         u32 val, offset, start;
15894         int i, vlen;
15895
15896         for (offset = TG3_NVM_DIR_START;
15897              offset < TG3_NVM_DIR_END;
15898              offset += TG3_NVM_DIRENT_SIZE) {
15899                 if (tg3_nvram_read(tp, offset, &val))
15900                         return;
15901
15902                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15903                         break;
15904         }
15905
15906         if (offset == TG3_NVM_DIR_END)
15907                 return;
15908
15909         if (!tg3_flag(tp, 5705_PLUS))
15910                 start = 0x08000000;
15911         else if (tg3_nvram_read(tp, offset - 4, &start))
15912                 return;
15913
15914         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15915             !tg3_fw_img_is_valid(tp, offset) ||
15916             tg3_nvram_read(tp, offset + 8, &val))
15917                 return;
15918
15919         offset += val - start;
15920
15921         vlen = strlen(tp->fw_ver);
15922
15923         tp->fw_ver[vlen++] = ',';
15924         tp->fw_ver[vlen++] = ' ';
15925
15926         for (i = 0; i < 4; i++) {
15927                 __be32 v;
15928                 if (tg3_nvram_read_be32(tp, offset, &v))
15929                         return;
15930
15931                 offset += sizeof(v);
15932
15933                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15934                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15935                         break;
15936                 }
15937
15938                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15939                 vlen += sizeof(v);
15940         }
15941 }
15942
15943 static void tg3_probe_ncsi(struct tg3 *tp)
15944 {
15945         u32 apedata;
15946
15947         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15948         if (apedata != APE_SEG_SIG_MAGIC)
15949                 return;
15950
15951         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15952         if (!(apedata & APE_FW_STATUS_READY))
15953                 return;
15954
15955         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15956                 tg3_flag_set(tp, APE_HAS_NCSI);
15957 }
15958
15959 static void tg3_read_dash_ver(struct tg3 *tp)
15960 {
15961         int vlen;
15962         u32 apedata;
15963         char *fwtype;
15964
15965         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15966
15967         if (tg3_flag(tp, APE_HAS_NCSI))
15968                 fwtype = "NCSI";
15969         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15970                 fwtype = "SMASH";
15971         else
15972                 fwtype = "DASH";
15973
15974         vlen = strlen(tp->fw_ver);
15975
15976         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15977                  fwtype,
15978                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15979                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15980                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15981                  (apedata & APE_FW_VERSION_BLDMSK));
15982 }
15983
15984 static void tg3_read_otp_ver(struct tg3 *tp)
15985 {
15986         u32 val, val2;
15987
15988         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15989                 return;
15990
15991         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15992             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15993             TG3_OTP_MAGIC0_VALID(val)) {
15994                 u64 val64 = (u64) val << 32 | val2;
15995                 u32 ver = 0;
15996                 int i, vlen;
15997
15998                 for (i = 0; i < 7; i++) {
15999                         if ((val64 & 0xff) == 0)
16000                                 break;
16001                         ver = val64 & 0xff;
16002                         val64 >>= 8;
16003                 }
16004                 vlen = strlen(tp->fw_ver);
16005                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16006         }
16007 }
16008
16009 static void tg3_read_fw_ver(struct tg3 *tp)
16010 {
16011         u32 val;
16012         bool vpd_vers = false;
16013
16014         if (tp->fw_ver[0] != 0)
16015                 vpd_vers = true;
16016
16017         if (tg3_flag(tp, NO_NVRAM)) {
16018                 strcat(tp->fw_ver, "sb");
16019                 tg3_read_otp_ver(tp);
16020                 return;
16021         }
16022
16023         if (tg3_nvram_read(tp, 0, &val))
16024                 return;
16025
16026         if (val == TG3_EEPROM_MAGIC)
16027                 tg3_read_bc_ver(tp);
16028         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16029                 tg3_read_sb_ver(tp, val);
16030         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16031                 tg3_read_hwsb_ver(tp);
16032
16033         if (tg3_flag(tp, ENABLE_ASF)) {
16034                 if (tg3_flag(tp, ENABLE_APE)) {
16035                         tg3_probe_ncsi(tp);
16036                         if (!vpd_vers)
16037                                 tg3_read_dash_ver(tp);
16038                 } else if (!vpd_vers) {
16039                         tg3_read_mgmtfw_ver(tp);
16040                 }
16041         }
16042
16043         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16044 }
16045
16046 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16047 {
16048         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16049                 return TG3_RX_RET_MAX_SIZE_5717;
16050         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16051                 return TG3_RX_RET_MAX_SIZE_5700;
16052         else
16053                 return TG3_RX_RET_MAX_SIZE_5705;
16054 }
16055
16056 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16057         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16058         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16059         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16060         { },
16061 };
16062
16063 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16064 {
16065         struct pci_dev *peer;
16066         unsigned int func, devnr = tp->pdev->devfn & ~7;
16067
16068         for (func = 0; func < 8; func++) {
16069                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16070                 if (peer && peer != tp->pdev)
16071                         break;
16072                 pci_dev_put(peer);
16073         }
16074         /* 5704 can be configured in single-port mode, set peer to
16075          * tp->pdev in that case.
16076          */
16077         if (!peer) {
16078                 peer = tp->pdev;
16079                 return peer;
16080         }
16081
16082         /*
16083          * We don't need to keep the refcount elevated; there's no way
16084          * to remove one half of this device without removing the other
16085          */
16086         pci_dev_put(peer);
16087
16088         return peer;
16089 }
16090
16091 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16092 {
16093         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16094         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16095                 u32 reg;
16096
16097                 /* All devices that use the alternate
16098                  * ASIC REV location have a CPMU.
16099                  */
16100                 tg3_flag_set(tp, CPMU_PRESENT);
16101
16102                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16103                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16104                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16105                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16106                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16107                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16108                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16109                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16110                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16111                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16112                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16113                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16114                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16115                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16116                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16117                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16118                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16119                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16120                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16121                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16122                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16123                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16124                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16125                 else
16126                         reg = TG3PCI_PRODID_ASICREV;
16127
16128                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16129         }
16130
16131         /* Wrong chip ID in 5752 A0. This code can be removed later
16132          * as A0 is not in production.
16133          */
16134         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16135                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16136
16137         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16138                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16139
16140         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16141             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16142             tg3_asic_rev(tp) == ASIC_REV_5720)
16143                 tg3_flag_set(tp, 5717_PLUS);
16144
16145         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16146             tg3_asic_rev(tp) == ASIC_REV_57766)
16147                 tg3_flag_set(tp, 57765_CLASS);
16148
16149         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16150              tg3_asic_rev(tp) == ASIC_REV_5762)
16151                 tg3_flag_set(tp, 57765_PLUS);
16152
16153         /* Intentionally exclude ASIC_REV_5906 */
16154         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16155             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16156             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16157             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16158             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16159             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16160             tg3_flag(tp, 57765_PLUS))
16161                 tg3_flag_set(tp, 5755_PLUS);
16162
16163         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16164             tg3_asic_rev(tp) == ASIC_REV_5714)
16165                 tg3_flag_set(tp, 5780_CLASS);
16166
16167         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16168             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16169             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16170             tg3_flag(tp, 5755_PLUS) ||
16171             tg3_flag(tp, 5780_CLASS))
16172                 tg3_flag_set(tp, 5750_PLUS);
16173
16174         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16175             tg3_flag(tp, 5750_PLUS))
16176                 tg3_flag_set(tp, 5705_PLUS);
16177 }
16178
16179 static bool tg3_10_100_only_device(struct tg3 *tp,
16180                                    const struct pci_device_id *ent)
16181 {
16182         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16183
16184         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16185              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16186             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16187                 return true;
16188
16189         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16190                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16191                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16192                                 return true;
16193                 } else {
16194                         return true;
16195                 }
16196         }
16197
16198         return false;
16199 }
16200
16201 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16202 {
16203         u32 misc_ctrl_reg;
16204         u32 pci_state_reg, grc_misc_cfg;
16205         u32 val;
16206         u16 pci_cmd;
16207         int err;
16208
16209         /* Force memory write invalidate off.  If we leave it on,
16210          * then on 5700_BX chips we have to enable a workaround.
16211          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16212          * to match the cacheline size.  The Broadcom driver have this
16213          * workaround but turns MWI off all the times so never uses
16214          * it.  This seems to suggest that the workaround is insufficient.
16215          */
16216         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16217         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16218         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16219
16220         /* Important! -- Make sure register accesses are byteswapped
16221          * correctly.  Also, for those chips that require it, make
16222          * sure that indirect register accesses are enabled before
16223          * the first operation.
16224          */
16225         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16226                               &misc_ctrl_reg);
16227         tp->misc_host_ctrl |= (misc_ctrl_reg &
16228                                MISC_HOST_CTRL_CHIPREV);
16229         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16230                                tp->misc_host_ctrl);
16231
16232         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16233
16234         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16235          * we need to disable memory and use config. cycles
16236          * only to access all registers. The 5702/03 chips
16237          * can mistakenly decode the special cycles from the
16238          * ICH chipsets as memory write cycles, causing corruption
16239          * of register and memory space. Only certain ICH bridges
16240          * will drive special cycles with non-zero data during the
16241          * address phase which can fall within the 5703's address
16242          * range. This is not an ICH bug as the PCI spec allows
16243          * non-zero address during special cycles. However, only
16244          * these ICH bridges are known to drive non-zero addresses
16245          * during special cycles.
16246          *
16247          * Since special cycles do not cross PCI bridges, we only
16248          * enable this workaround if the 5703 is on the secondary
16249          * bus of these ICH bridges.
16250          */
16251         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16252             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16253                 static struct tg3_dev_id {
16254                         u32     vendor;
16255                         u32     device;
16256                         u32     rev;
16257                 } ich_chipsets[] = {
16258                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16259                           PCI_ANY_ID },
16260                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16261                           PCI_ANY_ID },
16262                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16263                           0xa },
16264                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16265                           PCI_ANY_ID },
16266                         { },
16267                 };
16268                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16269                 struct pci_dev *bridge = NULL;
16270
16271                 while (pci_id->vendor != 0) {
16272                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16273                                                 bridge);
16274                         if (!bridge) {
16275                                 pci_id++;
16276                                 continue;
16277                         }
16278                         if (pci_id->rev != PCI_ANY_ID) {
16279                                 if (bridge->revision > pci_id->rev)
16280                                         continue;
16281                         }
16282                         if (bridge->subordinate &&
16283                             (bridge->subordinate->number ==
16284                              tp->pdev->bus->number)) {
16285                                 tg3_flag_set(tp, ICH_WORKAROUND);
16286                                 pci_dev_put(bridge);
16287                                 break;
16288                         }
16289                 }
16290         }
16291
16292         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16293                 static struct tg3_dev_id {
16294                         u32     vendor;
16295                         u32     device;
16296                 } bridge_chipsets[] = {
16297                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16298                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16299                         { },
16300                 };
16301                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16302                 struct pci_dev *bridge = NULL;
16303
16304                 while (pci_id->vendor != 0) {
16305                         bridge = pci_get_device(pci_id->vendor,
16306                                                 pci_id->device,
16307                                                 bridge);
16308                         if (!bridge) {
16309                                 pci_id++;
16310                                 continue;
16311                         }
16312                         if (bridge->subordinate &&
16313                             (bridge->subordinate->number <=
16314                              tp->pdev->bus->number) &&
16315                             (bridge->subordinate->busn_res.end >=
16316                              tp->pdev->bus->number)) {
16317                                 tg3_flag_set(tp, 5701_DMA_BUG);
16318                                 pci_dev_put(bridge);
16319                                 break;
16320                         }
16321                 }
16322         }
16323
16324         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16325          * DMA addresses > 40-bit. This bridge may have other additional
16326          * 57xx devices behind it in some 4-port NIC designs for example.
16327          * Any tg3 device found behind the bridge will also need the 40-bit
16328          * DMA workaround.
16329          */
16330         if (tg3_flag(tp, 5780_CLASS)) {
16331                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16332                 tp->msi_cap = tp->pdev->msi_cap;
16333         } else {
16334                 struct pci_dev *bridge = NULL;
16335
16336                 do {
16337                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16338                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16339                                                 bridge);
16340                         if (bridge && bridge->subordinate &&
16341                             (bridge->subordinate->number <=
16342                              tp->pdev->bus->number) &&
16343                             (bridge->subordinate->busn_res.end >=
16344                              tp->pdev->bus->number)) {
16345                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16346                                 pci_dev_put(bridge);
16347                                 break;
16348                         }
16349                 } while (bridge);
16350         }
16351
16352         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16353             tg3_asic_rev(tp) == ASIC_REV_5714)
16354                 tp->pdev_peer = tg3_find_peer(tp);
16355
16356         /* Determine TSO capabilities */
16357         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16358                 ; /* Do nothing. HW bug. */
16359         else if (tg3_flag(tp, 57765_PLUS))
16360                 tg3_flag_set(tp, HW_TSO_3);
16361         else if (tg3_flag(tp, 5755_PLUS) ||
16362                  tg3_asic_rev(tp) == ASIC_REV_5906)
16363                 tg3_flag_set(tp, HW_TSO_2);
16364         else if (tg3_flag(tp, 5750_PLUS)) {
16365                 tg3_flag_set(tp, HW_TSO_1);
16366                 tg3_flag_set(tp, TSO_BUG);
16367                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16368                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16369                         tg3_flag_clear(tp, TSO_BUG);
16370         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16371                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16372                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16373                 tg3_flag_set(tp, FW_TSO);
16374                 tg3_flag_set(tp, TSO_BUG);
16375                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16376                         tp->fw_needed = FIRMWARE_TG3TSO5;
16377                 else
16378                         tp->fw_needed = FIRMWARE_TG3TSO;
16379         }
16380
16381         /* Selectively allow TSO based on operating conditions */
16382         if (tg3_flag(tp, HW_TSO_1) ||
16383             tg3_flag(tp, HW_TSO_2) ||
16384             tg3_flag(tp, HW_TSO_3) ||
16385             tg3_flag(tp, FW_TSO)) {
16386                 /* For firmware TSO, assume ASF is disabled.
16387                  * We'll disable TSO later if we discover ASF
16388                  * is enabled in tg3_get_eeprom_hw_cfg().
16389                  */
16390                 tg3_flag_set(tp, TSO_CAPABLE);
16391         } else {
16392                 tg3_flag_clear(tp, TSO_CAPABLE);
16393                 tg3_flag_clear(tp, TSO_BUG);
16394                 tp->fw_needed = NULL;
16395         }
16396
16397         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16398                 tp->fw_needed = FIRMWARE_TG3;
16399
16400         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16401                 tp->fw_needed = FIRMWARE_TG357766;
16402
16403         tp->irq_max = 1;
16404
16405         if (tg3_flag(tp, 5750_PLUS)) {
16406                 tg3_flag_set(tp, SUPPORT_MSI);
16407                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16408                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16409                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16410                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16411                      tp->pdev_peer == tp->pdev))
16412                         tg3_flag_clear(tp, SUPPORT_MSI);
16413
16414                 if (tg3_flag(tp, 5755_PLUS) ||
16415                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16416                         tg3_flag_set(tp, 1SHOT_MSI);
16417                 }
16418
16419                 if (tg3_flag(tp, 57765_PLUS)) {
16420                         tg3_flag_set(tp, SUPPORT_MSIX);
16421                         tp->irq_max = TG3_IRQ_MAX_VECS;
16422                 }
16423         }
16424
16425         tp->txq_max = 1;
16426         tp->rxq_max = 1;
16427         if (tp->irq_max > 1) {
16428                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16429                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16430
16431                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16432                     tg3_asic_rev(tp) == ASIC_REV_5720)
16433                         tp->txq_max = tp->irq_max - 1;
16434         }
16435
16436         if (tg3_flag(tp, 5755_PLUS) ||
16437             tg3_asic_rev(tp) == ASIC_REV_5906)
16438                 tg3_flag_set(tp, SHORT_DMA_BUG);
16439
16440         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16441                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16442
16443         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16444             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16445             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16446             tg3_asic_rev(tp) == ASIC_REV_5762)
16447                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16448
16449         if (tg3_flag(tp, 57765_PLUS) &&
16450             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16451                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16452
16453         if (!tg3_flag(tp, 5705_PLUS) ||
16454             tg3_flag(tp, 5780_CLASS) ||
16455             tg3_flag(tp, USE_JUMBO_BDFLAG))
16456                 tg3_flag_set(tp, JUMBO_CAPABLE);
16457
16458         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16459                               &pci_state_reg);
16460
16461         if (pci_is_pcie(tp->pdev)) {
16462                 u16 lnkctl;
16463
16464                 tg3_flag_set(tp, PCI_EXPRESS);
16465
16466                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16467                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16468                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16469                                 tg3_flag_clear(tp, HW_TSO_2);
16470                                 tg3_flag_clear(tp, TSO_CAPABLE);
16471                         }
16472                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16473                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16474                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16475                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16476                                 tg3_flag_set(tp, CLKREQ_BUG);
16477                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16478                         tg3_flag_set(tp, L1PLLPD_EN);
16479                 }
16480         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16481                 /* BCM5785 devices are effectively PCIe devices, and should
16482                  * follow PCIe codepaths, but do not have a PCIe capabilities
16483                  * section.
16484                  */
16485                 tg3_flag_set(tp, PCI_EXPRESS);
16486         } else if (!tg3_flag(tp, 5705_PLUS) ||
16487                    tg3_flag(tp, 5780_CLASS)) {
16488                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16489                 if (!tp->pcix_cap) {
16490                         dev_err(&tp->pdev->dev,
16491                                 "Cannot find PCI-X capability, aborting\n");
16492                         return -EIO;
16493                 }
16494
16495                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16496                         tg3_flag_set(tp, PCIX_MODE);
16497         }
16498
16499         /* If we have an AMD 762 or VIA K8T800 chipset, write
16500          * reordering to the mailbox registers done by the host
16501          * controller can cause major troubles.  We read back from
16502          * every mailbox register write to force the writes to be
16503          * posted to the chip in order.
16504          */
16505         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16506             !tg3_flag(tp, PCI_EXPRESS))
16507                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16508
16509         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16510                              &tp->pci_cacheline_sz);
16511         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16512                              &tp->pci_lat_timer);
16513         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16514             tp->pci_lat_timer < 64) {
16515                 tp->pci_lat_timer = 64;
16516                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16517                                       tp->pci_lat_timer);
16518         }
16519
16520         /* Important! -- It is critical that the PCI-X hw workaround
16521          * situation is decided before the first MMIO register access.
16522          */
16523         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16524                 /* 5700 BX chips need to have their TX producer index
16525                  * mailboxes written twice to workaround a bug.
16526                  */
16527                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16528
16529                 /* If we are in PCI-X mode, enable register write workaround.
16530                  *
16531                  * The workaround is to use indirect register accesses
16532                  * for all chip writes not to mailbox registers.
16533                  */
16534                 if (tg3_flag(tp, PCIX_MODE)) {
16535                         u32 pm_reg;
16536
16537                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16538
16539                         /* The chip can have it's power management PCI config
16540                          * space registers clobbered due to this bug.
16541                          * So explicitly force the chip into D0 here.
16542                          */
16543                         pci_read_config_dword(tp->pdev,
16544                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16545                                               &pm_reg);
16546                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16547                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16548                         pci_write_config_dword(tp->pdev,
16549                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16550                                                pm_reg);
16551
16552                         /* Also, force SERR#/PERR# in PCI command. */
16553                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16554                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16555                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16556                 }
16557         }
16558
16559         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16560                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16561         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16562                 tg3_flag_set(tp, PCI_32BIT);
16563
16564         /* Chip-specific fixup from Broadcom driver */
16565         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16566             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16567                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16568                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16569         }
16570
16571         /* Default fast path register access methods */
16572         tp->read32 = tg3_read32;
16573         tp->write32 = tg3_write32;
16574         tp->read32_mbox = tg3_read32;
16575         tp->write32_mbox = tg3_write32;
16576         tp->write32_tx_mbox = tg3_write32;
16577         tp->write32_rx_mbox = tg3_write32;
16578
16579         /* Various workaround register access methods */
16580         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16581                 tp->write32 = tg3_write_indirect_reg32;
16582         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16583                  (tg3_flag(tp, PCI_EXPRESS) &&
16584                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16585                 /*
16586                  * Back to back register writes can cause problems on these
16587                  * chips, the workaround is to read back all reg writes
16588                  * except those to mailbox regs.
16589                  *
16590                  * See tg3_write_indirect_reg32().
16591                  */
16592                 tp->write32 = tg3_write_flush_reg32;
16593         }
16594
16595         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16596                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16597                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16598                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16599         }
16600
16601         if (tg3_flag(tp, ICH_WORKAROUND)) {
16602                 tp->read32 = tg3_read_indirect_reg32;
16603                 tp->write32 = tg3_write_indirect_reg32;
16604                 tp->read32_mbox = tg3_read_indirect_mbox;
16605                 tp->write32_mbox = tg3_write_indirect_mbox;
16606                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16607                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16608
16609                 iounmap(tp->regs);
16610                 tp->regs = NULL;
16611
16612                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16613                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16614                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16615         }
16616         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16617                 tp->read32_mbox = tg3_read32_mbox_5906;
16618                 tp->write32_mbox = tg3_write32_mbox_5906;
16619                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16620                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16621         }
16622
16623         if (tp->write32 == tg3_write_indirect_reg32 ||
16624             (tg3_flag(tp, PCIX_MODE) &&
16625              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16626               tg3_asic_rev(tp) == ASIC_REV_5701)))
16627                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16628
16629         /* The memory arbiter has to be enabled in order for SRAM accesses
16630          * to succeed.  Normally on powerup the tg3 chip firmware will make
16631          * sure it is enabled, but other entities such as system netboot
16632          * code might disable it.
16633          */
16634         val = tr32(MEMARB_MODE);
16635         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16636
16637         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16638         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16639             tg3_flag(tp, 5780_CLASS)) {
16640                 if (tg3_flag(tp, PCIX_MODE)) {
16641                         pci_read_config_dword(tp->pdev,
16642                                               tp->pcix_cap + PCI_X_STATUS,
16643                                               &val);
16644                         tp->pci_fn = val & 0x7;
16645                 }
16646         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16647                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16648                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16649                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16650                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16651                         val = tr32(TG3_CPMU_STATUS);
16652
16653                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16654                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16655                 else
16656                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16657                                      TG3_CPMU_STATUS_FSHFT_5719;
16658         }
16659
16660         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16661                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16662                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16663         }
16664
16665         /* Get eeprom hw config before calling tg3_set_power_state().
16666          * In particular, the TG3_FLAG_IS_NIC flag must be
16667          * determined before calling tg3_set_power_state() so that
16668          * we know whether or not to switch out of Vaux power.
16669          * When the flag is set, it means that GPIO1 is used for eeprom
16670          * write protect and also implies that it is a LOM where GPIOs
16671          * are not used to switch power.
16672          */
16673         tg3_get_eeprom_hw_cfg(tp);
16674
16675         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16676                 tg3_flag_clear(tp, TSO_CAPABLE);
16677                 tg3_flag_clear(tp, TSO_BUG);
16678                 tp->fw_needed = NULL;
16679         }
16680
16681         if (tg3_flag(tp, ENABLE_APE)) {
16682                 /* Allow reads and writes to the
16683                  * APE register and memory space.
16684                  */
16685                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16686                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16687                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16688                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16689                                        pci_state_reg);
16690
16691                 tg3_ape_lock_init(tp);
16692                 tp->ape_hb_interval =
16693                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16694         }
16695
16696         /* Set up tp->grc_local_ctrl before calling
16697          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16698          * will bring 5700's external PHY out of reset.
16699          * It is also used as eeprom write protect on LOMs.
16700          */
16701         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16702         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16703             tg3_flag(tp, EEPROM_WRITE_PROT))
16704                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16705                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16706         /* Unused GPIO3 must be driven as output on 5752 because there
16707          * are no pull-up resistors on unused GPIO pins.
16708          */
16709         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16710                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16711
16712         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16713             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16714             tg3_flag(tp, 57765_CLASS))
16715                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16716
16717         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16718             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16719                 /* Turn off the debug UART. */
16720                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16721                 if (tg3_flag(tp, IS_NIC))
16722                         /* Keep VMain power. */
16723                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16724                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16725         }
16726
16727         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16728                 tp->grc_local_ctrl |=
16729                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16730
16731         /* Switch out of Vaux if it is a NIC */
16732         tg3_pwrsrc_switch_to_vmain(tp);
16733
16734         /* Derive initial jumbo mode from MTU assigned in
16735          * ether_setup() via the alloc_etherdev() call
16736          */
16737         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16738                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16739
16740         /* Determine WakeOnLan speed to use. */
16741         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16742             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16743             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16744             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16745                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16746         } else {
16747                 tg3_flag_set(tp, WOL_SPEED_100MB);
16748         }
16749
16750         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16751                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16752
16753         /* A few boards don't want Ethernet@WireSpeed phy feature */
16754         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16755             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16756              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16757              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16758             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16759             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16760                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16761
16762         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16763             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16764                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16765         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16766                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16767
16768         if (tg3_flag(tp, 5705_PLUS) &&
16769             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16770             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16771             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16772             !tg3_flag(tp, 57765_PLUS)) {
16773                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16774                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16775                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16776                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16777                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16778                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16779                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16780                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16781                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16782                 } else
16783                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16784         }
16785
16786         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16787             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16788                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16789                 if (tp->phy_otp == 0)
16790                         tp->phy_otp = TG3_OTP_DEFAULT;
16791         }
16792
16793         if (tg3_flag(tp, CPMU_PRESENT))
16794                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16795         else
16796                 tp->mi_mode = MAC_MI_MODE_BASE;
16797
16798         tp->coalesce_mode = 0;
16799         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16800             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16801                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16802
16803         /* Set these bits to enable statistics workaround. */
16804         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16805             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16806             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16807             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16808                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16809                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16810         }
16811
16812         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16813             tg3_asic_rev(tp) == ASIC_REV_57780)
16814                 tg3_flag_set(tp, USE_PHYLIB);
16815
16816         err = tg3_mdio_init(tp);
16817         if (err)
16818                 return err;
16819
16820         /* Initialize data/descriptor byte/word swapping. */
16821         val = tr32(GRC_MODE);
16822         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16823             tg3_asic_rev(tp) == ASIC_REV_5762)
16824                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16825                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16826                         GRC_MODE_B2HRX_ENABLE |
16827                         GRC_MODE_HTX2B_ENABLE |
16828                         GRC_MODE_HOST_STACKUP);
16829         else
16830                 val &= GRC_MODE_HOST_STACKUP;
16831
16832         tw32(GRC_MODE, val | tp->grc_mode);
16833
16834         tg3_switch_clocks(tp);
16835
16836         /* Clear this out for sanity. */
16837         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16838
16839         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16840         tw32(TG3PCI_REG_BASE_ADDR, 0);
16841
16842         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16843                               &pci_state_reg);
16844         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16845             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16846                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16847                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16848                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16849                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16850                         void __iomem *sram_base;
16851
16852                         /* Write some dummy words into the SRAM status block
16853                          * area, see if it reads back correctly.  If the return
16854                          * value is bad, force enable the PCIX workaround.
16855                          */
16856                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16857
16858                         writel(0x00000000, sram_base);
16859                         writel(0x00000000, sram_base + 4);
16860                         writel(0xffffffff, sram_base + 4);
16861                         if (readl(sram_base) != 0x00000000)
16862                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16863                 }
16864         }
16865
16866         udelay(50);
16867         tg3_nvram_init(tp);
16868
16869         /* If the device has an NVRAM, no need to load patch firmware */
16870         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16871             !tg3_flag(tp, NO_NVRAM))
16872                 tp->fw_needed = NULL;
16873
16874         grc_misc_cfg = tr32(GRC_MISC_CFG);
16875         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16876
16877         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16878             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16879              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16880                 tg3_flag_set(tp, IS_5788);
16881
16882         if (!tg3_flag(tp, IS_5788) &&
16883             tg3_asic_rev(tp) != ASIC_REV_5700)
16884                 tg3_flag_set(tp, TAGGED_STATUS);
16885         if (tg3_flag(tp, TAGGED_STATUS)) {
16886                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16887                                       HOSTCC_MODE_CLRTICK_TXBD);
16888
16889                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16890                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16891                                        tp->misc_host_ctrl);
16892         }
16893
16894         /* Preserve the APE MAC_MODE bits */
16895         if (tg3_flag(tp, ENABLE_APE))
16896                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16897         else
16898                 tp->mac_mode = 0;
16899
16900         if (tg3_10_100_only_device(tp, ent))
16901                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16902
16903         err = tg3_phy_probe(tp);
16904         if (err) {
16905                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16906                 /* ... but do not return immediately ... */
16907                 tg3_mdio_fini(tp);
16908         }
16909
16910         tg3_read_vpd(tp);
16911         tg3_read_fw_ver(tp);
16912
16913         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16914                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16915         } else {
16916                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16917                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16918                 else
16919                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16920         }
16921
16922         /* 5700 {AX,BX} chips have a broken status block link
16923          * change bit implementation, so we must use the
16924          * status register in those cases.
16925          */
16926         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16927                 tg3_flag_set(tp, USE_LINKCHG_REG);
16928         else
16929                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16930
16931         /* The led_ctrl is set during tg3_phy_probe, here we might
16932          * have to force the link status polling mechanism based
16933          * upon subsystem IDs.
16934          */
16935         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16936             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16937             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16938                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16939                 tg3_flag_set(tp, USE_LINKCHG_REG);
16940         }
16941
16942         /* For all SERDES we poll the MAC status register. */
16943         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16944                 tg3_flag_set(tp, POLL_SERDES);
16945         else
16946                 tg3_flag_clear(tp, POLL_SERDES);
16947
16948         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16949                 tg3_flag_set(tp, POLL_CPMU_LINK);
16950
16951         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16952         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16953         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16954             tg3_flag(tp, PCIX_MODE)) {
16955                 tp->rx_offset = NET_SKB_PAD;
16956 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16957                 tp->rx_copy_thresh = ~(u16)0;
16958 #endif
16959         }
16960
16961         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16962         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16963         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16964
16965         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16966
16967         /* Increment the rx prod index on the rx std ring by at most
16968          * 8 for these chips to workaround hw errata.
16969          */
16970         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16971             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16972             tg3_asic_rev(tp) == ASIC_REV_5755)
16973                 tp->rx_std_max_post = 8;
16974
16975         if (tg3_flag(tp, ASPM_WORKAROUND))
16976                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16977                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16978
16979         return err;
16980 }
16981
16982 static int tg3_get_device_address(struct tg3 *tp)
16983 {
16984         struct net_device *dev = tp->dev;
16985         u32 hi, lo, mac_offset;
16986         int addr_ok = 0;
16987         int err;
16988
16989         if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16990                 return 0;
16991
16992         if (tg3_flag(tp, IS_SSB_CORE)) {
16993                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16994                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16995                         return 0;
16996         }
16997
16998         mac_offset = 0x7c;
16999         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17000             tg3_flag(tp, 5780_CLASS)) {
17001                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17002                         mac_offset = 0xcc;
17003                 if (tg3_nvram_lock(tp))
17004                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17005                 else
17006                         tg3_nvram_unlock(tp);
17007         } else if (tg3_flag(tp, 5717_PLUS)) {
17008                 if (tp->pci_fn & 1)
17009                         mac_offset = 0xcc;
17010                 if (tp->pci_fn > 1)
17011                         mac_offset += 0x18c;
17012         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17013                 mac_offset = 0x10;
17014
17015         /* First try to get it from MAC address mailbox. */
17016         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17017         if ((hi >> 16) == 0x484b) {
17018                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17019                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17020
17021                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17022                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17023                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17024                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17025                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17026
17027                 /* Some old bootcode may report a 0 MAC address in SRAM */
17028                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17029         }
17030         if (!addr_ok) {
17031                 /* Next, try NVRAM. */
17032                 if (!tg3_flag(tp, NO_NVRAM) &&
17033                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17034                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17035                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17036                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17037                 }
17038                 /* Finally just fetch it out of the MAC control regs. */
17039                 else {
17040                         hi = tr32(MAC_ADDR_0_HIGH);
17041                         lo = tr32(MAC_ADDR_0_LOW);
17042
17043                         dev->dev_addr[5] = lo & 0xff;
17044                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17045                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17046                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17047                         dev->dev_addr[1] = hi & 0xff;
17048                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17049                 }
17050         }
17051
17052         if (!is_valid_ether_addr(&dev->dev_addr[0]))
17053                 return -EINVAL;
17054         return 0;
17055 }
17056
17057 #define BOUNDARY_SINGLE_CACHELINE       1
17058 #define BOUNDARY_MULTI_CACHELINE        2
17059
17060 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17061 {
17062         int cacheline_size;
17063         u8 byte;
17064         int goal;
17065
17066         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17067         if (byte == 0)
17068                 cacheline_size = 1024;
17069         else
17070                 cacheline_size = (int) byte * 4;
17071
17072         /* On 5703 and later chips, the boundary bits have no
17073          * effect.
17074          */
17075         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17076             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17077             !tg3_flag(tp, PCI_EXPRESS))
17078                 goto out;
17079
17080 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17081         goal = BOUNDARY_MULTI_CACHELINE;
17082 #else
17083 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17084         goal = BOUNDARY_SINGLE_CACHELINE;
17085 #else
17086         goal = 0;
17087 #endif
17088 #endif
17089
17090         if (tg3_flag(tp, 57765_PLUS)) {
17091                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17092                 goto out;
17093         }
17094
17095         if (!goal)
17096                 goto out;
17097
17098         /* PCI controllers on most RISC systems tend to disconnect
17099          * when a device tries to burst across a cache-line boundary.
17100          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17101          *
17102          * Unfortunately, for PCI-E there are only limited
17103          * write-side controls for this, and thus for reads
17104          * we will still get the disconnects.  We'll also waste
17105          * these PCI cycles for both read and write for chips
17106          * other than 5700 and 5701 which do not implement the
17107          * boundary bits.
17108          */
17109         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17110                 switch (cacheline_size) {
17111                 case 16:
17112                 case 32:
17113                 case 64:
17114                 case 128:
17115                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17116                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17117                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17118                         } else {
17119                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17120                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17121                         }
17122                         break;
17123
17124                 case 256:
17125                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17126                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17127                         break;
17128
17129                 default:
17130                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17131                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17132                         break;
17133                 }
17134         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17135                 switch (cacheline_size) {
17136                 case 16:
17137                 case 32:
17138                 case 64:
17139                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17140                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17141                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17142                                 break;
17143                         }
17144                         /* fallthrough */
17145                 case 128:
17146                 default:
17147                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17148                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17149                         break;
17150                 }
17151         } else {
17152                 switch (cacheline_size) {
17153                 case 16:
17154                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17155                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17156                                         DMA_RWCTRL_WRITE_BNDRY_16);
17157                                 break;
17158                         }
17159                         /* fallthrough */
17160                 case 32:
17161                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17162                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17163                                         DMA_RWCTRL_WRITE_BNDRY_32);
17164                                 break;
17165                         }
17166                         /* fallthrough */
17167                 case 64:
17168                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17169                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17170                                         DMA_RWCTRL_WRITE_BNDRY_64);
17171                                 break;
17172                         }
17173                         /* fallthrough */
17174                 case 128:
17175                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17176                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17177                                         DMA_RWCTRL_WRITE_BNDRY_128);
17178                                 break;
17179                         }
17180                         /* fallthrough */
17181                 case 256:
17182                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17183                                 DMA_RWCTRL_WRITE_BNDRY_256);
17184                         break;
17185                 case 512:
17186                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17187                                 DMA_RWCTRL_WRITE_BNDRY_512);
17188                         break;
17189                 case 1024:
17190                 default:
17191                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17192                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17193                         break;
17194                 }
17195         }
17196
17197 out:
17198         return val;
17199 }
17200
17201 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17202                            int size, bool to_device)
17203 {
17204         struct tg3_internal_buffer_desc test_desc;
17205         u32 sram_dma_descs;
17206         int i, ret;
17207
17208         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17209
17210         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17211         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17212         tw32(RDMAC_STATUS, 0);
17213         tw32(WDMAC_STATUS, 0);
17214
17215         tw32(BUFMGR_MODE, 0);
17216         tw32(FTQ_RESET, 0);
17217
17218         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17219         test_desc.addr_lo = buf_dma & 0xffffffff;
17220         test_desc.nic_mbuf = 0x00002100;
17221         test_desc.len = size;
17222
17223         /*
17224          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17225          * the *second* time the tg3 driver was getting loaded after an
17226          * initial scan.
17227          *
17228          * Broadcom tells me:
17229          *   ...the DMA engine is connected to the GRC block and a DMA
17230          *   reset may affect the GRC block in some unpredictable way...
17231          *   The behavior of resets to individual blocks has not been tested.
17232          *
17233          * Broadcom noted the GRC reset will also reset all sub-components.
17234          */
17235         if (to_device) {
17236                 test_desc.cqid_sqid = (13 << 8) | 2;
17237
17238                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17239                 udelay(40);
17240         } else {
17241                 test_desc.cqid_sqid = (16 << 8) | 7;
17242
17243                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17244                 udelay(40);
17245         }
17246         test_desc.flags = 0x00000005;
17247
17248         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17249                 u32 val;
17250
17251                 val = *(((u32 *)&test_desc) + i);
17252                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17253                                        sram_dma_descs + (i * sizeof(u32)));
17254                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17255         }
17256         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17257
17258         if (to_device)
17259                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17260         else
17261                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17262
17263         ret = -ENODEV;
17264         for (i = 0; i < 40; i++) {
17265                 u32 val;
17266
17267                 if (to_device)
17268                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17269                 else
17270                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17271                 if ((val & 0xffff) == sram_dma_descs) {
17272                         ret = 0;
17273                         break;
17274                 }
17275
17276                 udelay(100);
17277         }
17278
17279         return ret;
17280 }
17281
17282 #define TEST_BUFFER_SIZE        0x2000
17283
17284 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17285         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17286         { },
17287 };
17288
17289 static int tg3_test_dma(struct tg3 *tp)
17290 {
17291         dma_addr_t buf_dma;
17292         u32 *buf, saved_dma_rwctrl;
17293         int ret = 0;
17294
17295         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17296                                  &buf_dma, GFP_KERNEL);
17297         if (!buf) {
17298                 ret = -ENOMEM;
17299                 goto out_nofree;
17300         }
17301
17302         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17303                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17304
17305         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17306
17307         if (tg3_flag(tp, 57765_PLUS))
17308                 goto out;
17309
17310         if (tg3_flag(tp, PCI_EXPRESS)) {
17311                 /* DMA read watermark not used on PCIE */
17312                 tp->dma_rwctrl |= 0x00180000;
17313         } else if (!tg3_flag(tp, PCIX_MODE)) {
17314                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17315                     tg3_asic_rev(tp) == ASIC_REV_5750)
17316                         tp->dma_rwctrl |= 0x003f0000;
17317                 else
17318                         tp->dma_rwctrl |= 0x003f000f;
17319         } else {
17320                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17321                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17322                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17323                         u32 read_water = 0x7;
17324
17325                         /* If the 5704 is behind the EPB bridge, we can
17326                          * do the less restrictive ONE_DMA workaround for
17327                          * better performance.
17328                          */
17329                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17330                             tg3_asic_rev(tp) == ASIC_REV_5704)
17331                                 tp->dma_rwctrl |= 0x8000;
17332                         else if (ccval == 0x6 || ccval == 0x7)
17333                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17334
17335                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17336                                 read_water = 4;
17337                         /* Set bit 23 to enable PCIX hw bug fix */
17338                         tp->dma_rwctrl |=
17339                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17340                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17341                                 (1 << 23);
17342                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17343                         /* 5780 always in PCIX mode */
17344                         tp->dma_rwctrl |= 0x00144000;
17345                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17346                         /* 5714 always in PCIX mode */
17347                         tp->dma_rwctrl |= 0x00148000;
17348                 } else {
17349                         tp->dma_rwctrl |= 0x001b000f;
17350                 }
17351         }
17352         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17353                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17354
17355         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17356             tg3_asic_rev(tp) == ASIC_REV_5704)
17357                 tp->dma_rwctrl &= 0xfffffff0;
17358
17359         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17360             tg3_asic_rev(tp) == ASIC_REV_5701) {
17361                 /* Remove this if it causes problems for some boards. */
17362                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17363
17364                 /* On 5700/5701 chips, we need to set this bit.
17365                  * Otherwise the chip will issue cacheline transactions
17366                  * to streamable DMA memory with not all the byte
17367                  * enables turned on.  This is an error on several
17368                  * RISC PCI controllers, in particular sparc64.
17369                  *
17370                  * On 5703/5704 chips, this bit has been reassigned
17371                  * a different meaning.  In particular, it is used
17372                  * on those chips to enable a PCI-X workaround.
17373                  */
17374                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17375         }
17376
17377         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17378
17379
17380         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17381             tg3_asic_rev(tp) != ASIC_REV_5701)
17382                 goto out;
17383
17384         /* It is best to perform DMA test with maximum write burst size
17385          * to expose the 5700/5701 write DMA bug.
17386          */
17387         saved_dma_rwctrl = tp->dma_rwctrl;
17388         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17389         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17390
17391         while (1) {
17392                 u32 *p = buf, i;
17393
17394                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17395                         p[i] = i;
17396
17397                 /* Send the buffer to the chip. */
17398                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17399                 if (ret) {
17400                         dev_err(&tp->pdev->dev,
17401                                 "%s: Buffer write failed. err = %d\n",
17402                                 __func__, ret);
17403                         break;
17404                 }
17405
17406                 /* Now read it back. */
17407                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17408                 if (ret) {
17409                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17410                                 "err = %d\n", __func__, ret);
17411                         break;
17412                 }
17413
17414                 /* Verify it. */
17415                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17416                         if (p[i] == i)
17417                                 continue;
17418
17419                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17420                             DMA_RWCTRL_WRITE_BNDRY_16) {
17421                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17422                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17423                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17424                                 break;
17425                         } else {
17426                                 dev_err(&tp->pdev->dev,
17427                                         "%s: Buffer corrupted on read back! "
17428                                         "(%d != %d)\n", __func__, p[i], i);
17429                                 ret = -ENODEV;
17430                                 goto out;
17431                         }
17432                 }
17433
17434                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17435                         /* Success. */
17436                         ret = 0;
17437                         break;
17438                 }
17439         }
17440         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17441             DMA_RWCTRL_WRITE_BNDRY_16) {
17442                 /* DMA test passed without adjusting DMA boundary,
17443                  * now look for chipsets that are known to expose the
17444                  * DMA bug without failing the test.
17445                  */
17446                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17447                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17448                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17449                 } else {
17450                         /* Safe to use the calculated DMA boundary. */
17451                         tp->dma_rwctrl = saved_dma_rwctrl;
17452                 }
17453
17454                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17455         }
17456
17457 out:
17458         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17459 out_nofree:
17460         return ret;
17461 }
17462
17463 static void tg3_init_bufmgr_config(struct tg3 *tp)
17464 {
17465         if (tg3_flag(tp, 57765_PLUS)) {
17466                 tp->bufmgr_config.mbuf_read_dma_low_water =
17467                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17468                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17469                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17470                 tp->bufmgr_config.mbuf_high_water =
17471                         DEFAULT_MB_HIGH_WATER_57765;
17472
17473                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17474                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17475                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17476                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17477                 tp->bufmgr_config.mbuf_high_water_jumbo =
17478                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17479         } else if (tg3_flag(tp, 5705_PLUS)) {
17480                 tp->bufmgr_config.mbuf_read_dma_low_water =
17481                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17482                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17483                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17484                 tp->bufmgr_config.mbuf_high_water =
17485                         DEFAULT_MB_HIGH_WATER_5705;
17486                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17487                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17488                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17489                         tp->bufmgr_config.mbuf_high_water =
17490                                 DEFAULT_MB_HIGH_WATER_5906;
17491                 }
17492
17493                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17494                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17495                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17496                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17497                 tp->bufmgr_config.mbuf_high_water_jumbo =
17498                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17499         } else {
17500                 tp->bufmgr_config.mbuf_read_dma_low_water =
17501                         DEFAULT_MB_RDMA_LOW_WATER;
17502                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17503                         DEFAULT_MB_MACRX_LOW_WATER;
17504                 tp->bufmgr_config.mbuf_high_water =
17505                         DEFAULT_MB_HIGH_WATER;
17506
17507                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17508                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17509                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17510                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17511                 tp->bufmgr_config.mbuf_high_water_jumbo =
17512                         DEFAULT_MB_HIGH_WATER_JUMBO;
17513         }
17514
17515         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17516         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17517 }
17518
17519 static char *tg3_phy_string(struct tg3 *tp)
17520 {
17521         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17522         case TG3_PHY_ID_BCM5400:        return "5400";
17523         case TG3_PHY_ID_BCM5401:        return "5401";
17524         case TG3_PHY_ID_BCM5411:        return "5411";
17525         case TG3_PHY_ID_BCM5701:        return "5701";
17526         case TG3_PHY_ID_BCM5703:        return "5703";
17527         case TG3_PHY_ID_BCM5704:        return "5704";
17528         case TG3_PHY_ID_BCM5705:        return "5705";
17529         case TG3_PHY_ID_BCM5750:        return "5750";
17530         case TG3_PHY_ID_BCM5752:        return "5752";
17531         case TG3_PHY_ID_BCM5714:        return "5714";
17532         case TG3_PHY_ID_BCM5780:        return "5780";
17533         case TG3_PHY_ID_BCM5755:        return "5755";
17534         case TG3_PHY_ID_BCM5787:        return "5787";
17535         case TG3_PHY_ID_BCM5784:        return "5784";
17536         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17537         case TG3_PHY_ID_BCM5906:        return "5906";
17538         case TG3_PHY_ID_BCM5761:        return "5761";
17539         case TG3_PHY_ID_BCM5718C:       return "5718C";
17540         case TG3_PHY_ID_BCM5718S:       return "5718S";
17541         case TG3_PHY_ID_BCM57765:       return "57765";
17542         case TG3_PHY_ID_BCM5719C:       return "5719C";
17543         case TG3_PHY_ID_BCM5720C:       return "5720C";
17544         case TG3_PHY_ID_BCM5762:        return "5762C";
17545         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17546         case 0:                 return "serdes";
17547         default:                return "unknown";
17548         }
17549 }
17550
17551 static char *tg3_bus_string(struct tg3 *tp, char *str)
17552 {
17553         if (tg3_flag(tp, PCI_EXPRESS)) {
17554                 strcpy(str, "PCI Express");
17555                 return str;
17556         } else if (tg3_flag(tp, PCIX_MODE)) {
17557                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17558
17559                 strcpy(str, "PCIX:");
17560
17561                 if ((clock_ctrl == 7) ||
17562                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17563                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17564                         strcat(str, "133MHz");
17565                 else if (clock_ctrl == 0)
17566                         strcat(str, "33MHz");
17567                 else if (clock_ctrl == 2)
17568                         strcat(str, "50MHz");
17569                 else if (clock_ctrl == 4)
17570                         strcat(str, "66MHz");
17571                 else if (clock_ctrl == 6)
17572                         strcat(str, "100MHz");
17573         } else {
17574                 strcpy(str, "PCI:");
17575                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17576                         strcat(str, "66MHz");
17577                 else
17578                         strcat(str, "33MHz");
17579         }
17580         if (tg3_flag(tp, PCI_32BIT))
17581                 strcat(str, ":32-bit");
17582         else
17583                 strcat(str, ":64-bit");
17584         return str;
17585 }
17586
17587 static void tg3_init_coal(struct tg3 *tp)
17588 {
17589         struct ethtool_coalesce *ec = &tp->coal;
17590
17591         memset(ec, 0, sizeof(*ec));
17592         ec->cmd = ETHTOOL_GCOALESCE;
17593         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17594         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17595         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17596         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17597         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17598         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17599         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17600         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17601         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17602
17603         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17604                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17605                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17606                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17607                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17608                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17609         }
17610
17611         if (tg3_flag(tp, 5705_PLUS)) {
17612                 ec->rx_coalesce_usecs_irq = 0;
17613                 ec->tx_coalesce_usecs_irq = 0;
17614                 ec->stats_block_coalesce_usecs = 0;
17615         }
17616 }
17617
17618 static int tg3_init_one(struct pci_dev *pdev,
17619                                   const struct pci_device_id *ent)
17620 {
17621         struct net_device *dev;
17622         struct tg3 *tp;
17623         int i, err;
17624         u32 sndmbx, rcvmbx, intmbx;
17625         char str[40];
17626         u64 dma_mask, persist_dma_mask;
17627         netdev_features_t features = 0;
17628
17629         printk_once(KERN_INFO "%s\n", version);
17630
17631         err = pci_enable_device(pdev);
17632         if (err) {
17633                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17634                 return err;
17635         }
17636
17637         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17638         if (err) {
17639                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17640                 goto err_out_disable_pdev;
17641         }
17642
17643         pci_set_master(pdev);
17644
17645         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17646         if (!dev) {
17647                 err = -ENOMEM;
17648                 goto err_out_free_res;
17649         }
17650
17651         SET_NETDEV_DEV(dev, &pdev->dev);
17652
17653         tp = netdev_priv(dev);
17654         tp->pdev = pdev;
17655         tp->dev = dev;
17656         tp->rx_mode = TG3_DEF_RX_MODE;
17657         tp->tx_mode = TG3_DEF_TX_MODE;
17658         tp->irq_sync = 1;
17659         tp->pcierr_recovery = false;
17660
17661         if (tg3_debug > 0)
17662                 tp->msg_enable = tg3_debug;
17663         else
17664                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17665
17666         if (pdev_is_ssb_gige_core(pdev)) {
17667                 tg3_flag_set(tp, IS_SSB_CORE);
17668                 if (ssb_gige_must_flush_posted_writes(pdev))
17669                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17670                 if (ssb_gige_one_dma_at_once(pdev))
17671                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17672                 if (ssb_gige_have_roboswitch(pdev)) {
17673                         tg3_flag_set(tp, USE_PHYLIB);
17674                         tg3_flag_set(tp, ROBOSWITCH);
17675                 }
17676                 if (ssb_gige_is_rgmii(pdev))
17677                         tg3_flag_set(tp, RGMII_MODE);
17678         }
17679
17680         /* The word/byte swap controls here control register access byte
17681          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17682          * setting below.
17683          */
17684         tp->misc_host_ctrl =
17685                 MISC_HOST_CTRL_MASK_PCI_INT |
17686                 MISC_HOST_CTRL_WORD_SWAP |
17687                 MISC_HOST_CTRL_INDIR_ACCESS |
17688                 MISC_HOST_CTRL_PCISTATE_RW;
17689
17690         /* The NONFRM (non-frame) byte/word swap controls take effect
17691          * on descriptor entries, anything which isn't packet data.
17692          *
17693          * The StrongARM chips on the board (one for tx, one for rx)
17694          * are running in big-endian mode.
17695          */
17696         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17697                         GRC_MODE_WSWAP_NONFRM_DATA);
17698 #ifdef __BIG_ENDIAN
17699         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17700 #endif
17701         spin_lock_init(&tp->lock);
17702         spin_lock_init(&tp->indirect_lock);
17703         INIT_WORK(&tp->reset_task, tg3_reset_task);
17704
17705         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17706         if (!tp->regs) {
17707                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17708                 err = -ENOMEM;
17709                 goto err_out_free_dev;
17710         }
17711
17712         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17713             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17714             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17715             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17716             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17717             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17718             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17719             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17720             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17722             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17727                 tg3_flag_set(tp, ENABLE_APE);
17728                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17729                 if (!tp->aperegs) {
17730                         dev_err(&pdev->dev,
17731                                 "Cannot map APE registers, aborting\n");
17732                         err = -ENOMEM;
17733                         goto err_out_iounmap;
17734                 }
17735         }
17736
17737         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17738         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17739
17740         dev->ethtool_ops = &tg3_ethtool_ops;
17741         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17742         dev->netdev_ops = &tg3_netdev_ops;
17743         dev->irq = pdev->irq;
17744
17745         err = tg3_get_invariants(tp, ent);
17746         if (err) {
17747                 dev_err(&pdev->dev,
17748                         "Problem fetching invariants of chip, aborting\n");
17749                 goto err_out_apeunmap;
17750         }
17751
17752         /* The EPB bridge inside 5714, 5715, and 5780 and any
17753          * device behind the EPB cannot support DMA addresses > 40-bit.
17754          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17755          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17756          * do DMA address check in tg3_start_xmit().
17757          */
17758         if (tg3_flag(tp, IS_5788))
17759                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17760         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17761                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17762 #ifdef CONFIG_HIGHMEM
17763                 dma_mask = DMA_BIT_MASK(64);
17764 #endif
17765         } else
17766                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17767
17768         /* Configure DMA attributes. */
17769         if (dma_mask > DMA_BIT_MASK(32)) {
17770                 err = pci_set_dma_mask(pdev, dma_mask);
17771                 if (!err) {
17772                         features |= NETIF_F_HIGHDMA;
17773                         err = pci_set_consistent_dma_mask(pdev,
17774                                                           persist_dma_mask);
17775                         if (err < 0) {
17776                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17777                                         "DMA for consistent allocations\n");
17778                                 goto err_out_apeunmap;
17779                         }
17780                 }
17781         }
17782         if (err || dma_mask == DMA_BIT_MASK(32)) {
17783                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17784                 if (err) {
17785                         dev_err(&pdev->dev,
17786                                 "No usable DMA configuration, aborting\n");
17787                         goto err_out_apeunmap;
17788                 }
17789         }
17790
17791         tg3_init_bufmgr_config(tp);
17792
17793         /* 5700 B0 chips do not support checksumming correctly due
17794          * to hardware bugs.
17795          */
17796         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17797                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17798
17799                 if (tg3_flag(tp, 5755_PLUS))
17800                         features |= NETIF_F_IPV6_CSUM;
17801         }
17802
17803         /* TSO is on by default on chips that support hardware TSO.
17804          * Firmware TSO on older chips gives lower performance, so it
17805          * is off by default, but can be enabled using ethtool.
17806          */
17807         if ((tg3_flag(tp, HW_TSO_1) ||
17808              tg3_flag(tp, HW_TSO_2) ||
17809              tg3_flag(tp, HW_TSO_3)) &&
17810             (features & NETIF_F_IP_CSUM))
17811                 features |= NETIF_F_TSO;
17812         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17813                 if (features & NETIF_F_IPV6_CSUM)
17814                         features |= NETIF_F_TSO6;
17815                 if (tg3_flag(tp, HW_TSO_3) ||
17816                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17817                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17818                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17819                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17820                     tg3_asic_rev(tp) == ASIC_REV_57780)
17821                         features |= NETIF_F_TSO_ECN;
17822         }
17823
17824         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17825                          NETIF_F_HW_VLAN_CTAG_RX;
17826         dev->vlan_features |= features;
17827
17828         /*
17829          * Add loopback capability only for a subset of devices that support
17830          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17831          * loopback for the remaining devices.
17832          */
17833         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17834             !tg3_flag(tp, CPMU_PRESENT))
17835                 /* Add the loopback capability */
17836                 features |= NETIF_F_LOOPBACK;
17837
17838         dev->hw_features |= features;
17839         dev->priv_flags |= IFF_UNICAST_FLT;
17840
17841         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17842         dev->min_mtu = TG3_MIN_MTU;
17843         dev->max_mtu = TG3_MAX_MTU(tp);
17844
17845         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17846             !tg3_flag(tp, TSO_CAPABLE) &&
17847             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17848                 tg3_flag_set(tp, MAX_RXPEND_64);
17849                 tp->rx_pending = 63;
17850         }
17851
17852         err = tg3_get_device_address(tp);
17853         if (err) {
17854                 dev_err(&pdev->dev,
17855                         "Could not obtain valid ethernet address, aborting\n");
17856                 goto err_out_apeunmap;
17857         }
17858
17859         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17860         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17861         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17862         for (i = 0; i < tp->irq_max; i++) {
17863                 struct tg3_napi *tnapi = &tp->napi[i];
17864
17865                 tnapi->tp = tp;
17866                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17867
17868                 tnapi->int_mbox = intmbx;
17869                 if (i <= 4)
17870                         intmbx += 0x8;
17871                 else
17872                         intmbx += 0x4;
17873
17874                 tnapi->consmbox = rcvmbx;
17875                 tnapi->prodmbox = sndmbx;
17876
17877                 if (i)
17878                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17879                 else
17880                         tnapi->coal_now = HOSTCC_MODE_NOW;
17881
17882                 if (!tg3_flag(tp, SUPPORT_MSIX))
17883                         break;
17884
17885                 /*
17886                  * If we support MSIX, we'll be using RSS.  If we're using
17887                  * RSS, the first vector only handles link interrupts and the
17888                  * remaining vectors handle rx and tx interrupts.  Reuse the
17889                  * mailbox values for the next iteration.  The values we setup
17890                  * above are still useful for the single vectored mode.
17891                  */
17892                 if (!i)
17893                         continue;
17894
17895                 rcvmbx += 0x8;
17896
17897                 if (sndmbx & 0x4)
17898                         sndmbx -= 0x4;
17899                 else
17900                         sndmbx += 0xc;
17901         }
17902
17903         /*
17904          * Reset chip in case UNDI or EFI driver did not shutdown
17905          * DMA self test will enable WDMAC and we'll see (spurious)
17906          * pending DMA on the PCI bus at that point.
17907          */
17908         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17909             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17910                 tg3_full_lock(tp, 0);
17911                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17912                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17913                 tg3_full_unlock(tp);
17914         }
17915
17916         err = tg3_test_dma(tp);
17917         if (err) {
17918                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17919                 goto err_out_apeunmap;
17920         }
17921
17922         tg3_init_coal(tp);
17923
17924         pci_set_drvdata(pdev, dev);
17925
17926         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17927             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17928             tg3_asic_rev(tp) == ASIC_REV_5762)
17929                 tg3_flag_set(tp, PTP_CAPABLE);
17930
17931         tg3_timer_init(tp);
17932
17933         tg3_carrier_off(tp);
17934
17935         err = register_netdev(dev);
17936         if (err) {
17937                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17938                 goto err_out_apeunmap;
17939         }
17940
17941         if (tg3_flag(tp, PTP_CAPABLE)) {
17942                 tg3_ptp_init(tp);
17943                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17944                                                    &tp->pdev->dev);
17945                 if (IS_ERR(tp->ptp_clock))
17946                         tp->ptp_clock = NULL;
17947         }
17948
17949         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17950                     tp->board_part_number,
17951                     tg3_chip_rev_id(tp),
17952                     tg3_bus_string(tp, str),
17953                     dev->dev_addr);
17954
17955         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17956                 char *ethtype;
17957
17958                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17959                         ethtype = "10/100Base-TX";
17960                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17961                         ethtype = "1000Base-SX";
17962                 else
17963                         ethtype = "10/100/1000Base-T";
17964
17965                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17966                             "(WireSpeed[%d], EEE[%d])\n",
17967                             tg3_phy_string(tp), ethtype,
17968                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17969                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17970         }
17971
17972         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17973                     (dev->features & NETIF_F_RXCSUM) != 0,
17974                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17975                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17976                     tg3_flag(tp, ENABLE_ASF) != 0,
17977                     tg3_flag(tp, TSO_CAPABLE) != 0);
17978         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17979                     tp->dma_rwctrl,
17980                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17981                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17982
17983         pci_save_state(pdev);
17984
17985         return 0;
17986
17987 err_out_apeunmap:
17988         if (tp->aperegs) {
17989                 iounmap(tp->aperegs);
17990                 tp->aperegs = NULL;
17991         }
17992
17993 err_out_iounmap:
17994         if (tp->regs) {
17995                 iounmap(tp->regs);
17996                 tp->regs = NULL;
17997         }
17998
17999 err_out_free_dev:
18000         free_netdev(dev);
18001
18002 err_out_free_res:
18003         pci_release_regions(pdev);
18004
18005 err_out_disable_pdev:
18006         if (pci_is_enabled(pdev))
18007                 pci_disable_device(pdev);
18008         return err;
18009 }
18010
18011 static void tg3_remove_one(struct pci_dev *pdev)
18012 {
18013         struct net_device *dev = pci_get_drvdata(pdev);
18014
18015         if (dev) {
18016                 struct tg3 *tp = netdev_priv(dev);
18017
18018                 tg3_ptp_fini(tp);
18019
18020                 release_firmware(tp->fw);
18021
18022                 tg3_reset_task_cancel(tp);
18023
18024                 if (tg3_flag(tp, USE_PHYLIB)) {
18025                         tg3_phy_fini(tp);
18026                         tg3_mdio_fini(tp);
18027                 }
18028
18029                 unregister_netdev(dev);
18030                 if (tp->aperegs) {
18031                         iounmap(tp->aperegs);
18032                         tp->aperegs = NULL;
18033                 }
18034                 if (tp->regs) {
18035                         iounmap(tp->regs);
18036                         tp->regs = NULL;
18037                 }
18038                 free_netdev(dev);
18039                 pci_release_regions(pdev);
18040                 pci_disable_device(pdev);
18041         }
18042 }
18043
18044 #ifdef CONFIG_PM_SLEEP
18045 static int tg3_suspend(struct device *device)
18046 {
18047         struct pci_dev *pdev = to_pci_dev(device);
18048         struct net_device *dev = pci_get_drvdata(pdev);
18049         struct tg3 *tp = netdev_priv(dev);
18050         int err = 0;
18051
18052         rtnl_lock();
18053
18054         if (!netif_running(dev))
18055                 goto unlock;
18056
18057         tg3_reset_task_cancel(tp);
18058         tg3_phy_stop(tp);
18059         tg3_netif_stop(tp);
18060
18061         tg3_timer_stop(tp);
18062
18063         tg3_full_lock(tp, 1);
18064         tg3_disable_ints(tp);
18065         tg3_full_unlock(tp);
18066
18067         netif_device_detach(dev);
18068
18069         tg3_full_lock(tp, 0);
18070         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18071         tg3_flag_clear(tp, INIT_COMPLETE);
18072         tg3_full_unlock(tp);
18073
18074         err = tg3_power_down_prepare(tp);
18075         if (err) {
18076                 int err2;
18077
18078                 tg3_full_lock(tp, 0);
18079
18080                 tg3_flag_set(tp, INIT_COMPLETE);
18081                 err2 = tg3_restart_hw(tp, true);
18082                 if (err2)
18083                         goto out;
18084
18085                 tg3_timer_start(tp);
18086
18087                 netif_device_attach(dev);
18088                 tg3_netif_start(tp);
18089
18090 out:
18091                 tg3_full_unlock(tp);
18092
18093                 if (!err2)
18094                         tg3_phy_start(tp);
18095         }
18096
18097 unlock:
18098         rtnl_unlock();
18099         return err;
18100 }
18101
18102 static int tg3_resume(struct device *device)
18103 {
18104         struct pci_dev *pdev = to_pci_dev(device);
18105         struct net_device *dev = pci_get_drvdata(pdev);
18106         struct tg3 *tp = netdev_priv(dev);
18107         int err = 0;
18108
18109         rtnl_lock();
18110
18111         if (!netif_running(dev))
18112                 goto unlock;
18113
18114         netif_device_attach(dev);
18115
18116         tg3_full_lock(tp, 0);
18117
18118         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18119
18120         tg3_flag_set(tp, INIT_COMPLETE);
18121         err = tg3_restart_hw(tp,
18122                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18123         if (err)
18124                 goto out;
18125
18126         tg3_timer_start(tp);
18127
18128         tg3_netif_start(tp);
18129
18130 out:
18131         tg3_full_unlock(tp);
18132
18133         if (!err)
18134                 tg3_phy_start(tp);
18135
18136 unlock:
18137         rtnl_unlock();
18138         return err;
18139 }
18140 #endif /* CONFIG_PM_SLEEP */
18141
18142 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18143
18144 static void tg3_shutdown(struct pci_dev *pdev)
18145 {
18146         struct net_device *dev = pci_get_drvdata(pdev);
18147         struct tg3 *tp = netdev_priv(dev);
18148
18149         rtnl_lock();
18150         netif_device_detach(dev);
18151
18152         if (netif_running(dev))
18153                 dev_close(dev);
18154
18155         if (system_state == SYSTEM_POWER_OFF)
18156                 tg3_power_down(tp);
18157
18158         rtnl_unlock();
18159 }
18160
18161 /**
18162  * tg3_io_error_detected - called when PCI error is detected
18163  * @pdev: Pointer to PCI device
18164  * @state: The current pci connection state
18165  *
18166  * This function is called after a PCI bus error affecting
18167  * this device has been detected.
18168  */
18169 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18170                                               pci_channel_state_t state)
18171 {
18172         struct net_device *netdev = pci_get_drvdata(pdev);
18173         struct tg3 *tp = netdev_priv(netdev);
18174         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18175
18176         netdev_info(netdev, "PCI I/O error detected\n");
18177
18178         rtnl_lock();
18179
18180         /* We probably don't have netdev yet */
18181         if (!netdev || !netif_running(netdev))
18182                 goto done;
18183
18184         /* We needn't recover from permanent error */
18185         if (state == pci_channel_io_frozen)
18186                 tp->pcierr_recovery = true;
18187
18188         tg3_phy_stop(tp);
18189
18190         tg3_netif_stop(tp);
18191
18192         tg3_timer_stop(tp);
18193
18194         /* Want to make sure that the reset task doesn't run */
18195         tg3_reset_task_cancel(tp);
18196
18197         netif_device_detach(netdev);
18198
18199         /* Clean up software state, even if MMIO is blocked */
18200         tg3_full_lock(tp, 0);
18201         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18202         tg3_full_unlock(tp);
18203
18204 done:
18205         if (state == pci_channel_io_perm_failure) {
18206                 if (netdev) {
18207                         tg3_napi_enable(tp);
18208                         dev_close(netdev);
18209                 }
18210                 err = PCI_ERS_RESULT_DISCONNECT;
18211         } else {
18212                 pci_disable_device(pdev);
18213         }
18214
18215         rtnl_unlock();
18216
18217         return err;
18218 }
18219
18220 /**
18221  * tg3_io_slot_reset - called after the pci bus has been reset.
18222  * @pdev: Pointer to PCI device
18223  *
18224  * Restart the card from scratch, as if from a cold-boot.
18225  * At this point, the card has exprienced a hard reset,
18226  * followed by fixups by BIOS, and has its config space
18227  * set up identically to what it was at cold boot.
18228  */
18229 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18230 {
18231         struct net_device *netdev = pci_get_drvdata(pdev);
18232         struct tg3 *tp = netdev_priv(netdev);
18233         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18234         int err;
18235
18236         rtnl_lock();
18237
18238         if (pci_enable_device(pdev)) {
18239                 dev_err(&pdev->dev,
18240                         "Cannot re-enable PCI device after reset.\n");
18241                 goto done;
18242         }
18243
18244         pci_set_master(pdev);
18245         pci_restore_state(pdev);
18246         pci_save_state(pdev);
18247
18248         if (!netdev || !netif_running(netdev)) {
18249                 rc = PCI_ERS_RESULT_RECOVERED;
18250                 goto done;
18251         }
18252
18253         err = tg3_power_up(tp);
18254         if (err)
18255                 goto done;
18256
18257         rc = PCI_ERS_RESULT_RECOVERED;
18258
18259 done:
18260         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18261                 tg3_napi_enable(tp);
18262                 dev_close(netdev);
18263         }
18264         rtnl_unlock();
18265
18266         return rc;
18267 }
18268
18269 /**
18270  * tg3_io_resume - called when traffic can start flowing again.
18271  * @pdev: Pointer to PCI device
18272  *
18273  * This callback is called when the error recovery driver tells
18274  * us that its OK to resume normal operation.
18275  */
18276 static void tg3_io_resume(struct pci_dev *pdev)
18277 {
18278         struct net_device *netdev = pci_get_drvdata(pdev);
18279         struct tg3 *tp = netdev_priv(netdev);
18280         int err;
18281
18282         rtnl_lock();
18283
18284         if (!netdev || !netif_running(netdev))
18285                 goto done;
18286
18287         tg3_full_lock(tp, 0);
18288         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18289         tg3_flag_set(tp, INIT_COMPLETE);
18290         err = tg3_restart_hw(tp, true);
18291         if (err) {
18292                 tg3_full_unlock(tp);
18293                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18294                 goto done;
18295         }
18296
18297         netif_device_attach(netdev);
18298
18299         tg3_timer_start(tp);
18300
18301         tg3_netif_start(tp);
18302
18303         tg3_full_unlock(tp);
18304
18305         tg3_phy_start(tp);
18306
18307 done:
18308         tp->pcierr_recovery = false;
18309         rtnl_unlock();
18310 }
18311
18312 static const struct pci_error_handlers tg3_err_handler = {
18313         .error_detected = tg3_io_error_detected,
18314         .slot_reset     = tg3_io_slot_reset,
18315         .resume         = tg3_io_resume
18316 };
18317
18318 static struct pci_driver tg3_driver = {
18319         .name           = DRV_MODULE_NAME,
18320         .id_table       = tg3_pci_tbl,
18321         .probe          = tg3_init_one,
18322         .remove         = tg3_remove_one,
18323         .err_handler    = &tg3_err_handler,
18324         .driver.pm      = &tg3_pm_ops,
18325         .shutdown       = tg3_shutdown,
18326 };
18327
18328 module_pci_driver(tg3_driver);