2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 return test_bit(flag, bits);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 clear_bit(flag, bits);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
100 #define TG3_MIN_NUM 137
101 #define DRV_MODULE_VERSION \
102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE "May 11, 2014"
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
127 #define TG3_TX_TIMEOUT (5 * HZ)
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
167 #define TG3_DMA_BYTE_ENAB 64
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
212 #define TG3_RAW_IP_ALIGN 2
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
225 static char version[] =
226 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
236 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
243 static const struct pci_device_id tg3_pci_tbl[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 TG3_DRV_DATA_FLAG_5705_10_100},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293 PCI_VENDOR_ID_LENOVO,
294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
364 static const struct {
365 const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
369 { "rx_ucast_packets" },
370 { "rx_mcast_packets" },
371 { "rx_bcast_packets" },
373 { "rx_align_errors" },
374 { "rx_xon_pause_rcvd" },
375 { "rx_xoff_pause_rcvd" },
376 { "rx_mac_ctrl_rcvd" },
377 { "rx_xoff_entered" },
378 { "rx_frame_too_long_errors" },
380 { "rx_undersize_packets" },
381 { "rx_in_length_errors" },
382 { "rx_out_length_errors" },
383 { "rx_64_or_less_octet_packets" },
384 { "rx_65_to_127_octet_packets" },
385 { "rx_128_to_255_octet_packets" },
386 { "rx_256_to_511_octet_packets" },
387 { "rx_512_to_1023_octet_packets" },
388 { "rx_1024_to_1522_octet_packets" },
389 { "rx_1523_to_2047_octet_packets" },
390 { "rx_2048_to_4095_octet_packets" },
391 { "rx_4096_to_8191_octet_packets" },
392 { "rx_8192_to_9022_octet_packets" },
399 { "tx_flow_control" },
401 { "tx_single_collisions" },
402 { "tx_mult_collisions" },
404 { "tx_excessive_collisions" },
405 { "tx_late_collisions" },
406 { "tx_collide_2times" },
407 { "tx_collide_3times" },
408 { "tx_collide_4times" },
409 { "tx_collide_5times" },
410 { "tx_collide_6times" },
411 { "tx_collide_7times" },
412 { "tx_collide_8times" },
413 { "tx_collide_9times" },
414 { "tx_collide_10times" },
415 { "tx_collide_11times" },
416 { "tx_collide_12times" },
417 { "tx_collide_13times" },
418 { "tx_collide_14times" },
419 { "tx_collide_15times" },
420 { "tx_ucast_packets" },
421 { "tx_mcast_packets" },
422 { "tx_bcast_packets" },
423 { "tx_carrier_sense_errors" },
427 { "dma_writeq_full" },
428 { "dma_write_prioq_full" },
432 { "rx_threshold_hit" },
434 { "dma_readq_full" },
435 { "dma_read_prioq_full" },
436 { "tx_comp_queue_full" },
438 { "ring_set_send_prod_index" },
439 { "ring_status_update" },
441 { "nic_avoided_irqs" },
442 { "nic_tx_threshold_hit" },
444 { "mbuf_lwm_thresh_hit" },
447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST 0
449 #define TG3_LINK_TEST 1
450 #define TG3_REGISTER_TEST 2
451 #define TG3_MEMORY_TEST 3
452 #define TG3_MAC_LOOPB_TEST 4
453 #define TG3_PHY_LOOPB_TEST 5
454 #define TG3_EXT_LOOPB_TEST 6
455 #define TG3_INTERRUPT_TEST 7
458 static const struct {
459 const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461 [TG3_NVRAM_TEST] = { "nvram test (online) " },
462 [TG3_LINK_TEST] = { "link test (online) " },
463 [TG3_REGISTER_TEST] = { "register test (offline)" },
464 [TG3_MEMORY_TEST] = { "memory test (offline)" },
465 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
466 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
467 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
468 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
476 writel(val, tp->regs + off);
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
481 return readl(tp->regs + off);
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
486 writel(val, tp->aperegs + off);
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
491 return readl(tp->aperegs + off);
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
498 spin_lock_irqsave(&tp->indirect_lock, flags);
499 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
506 writel(val, tp->regs + off);
507 readl(tp->regs + off);
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
515 spin_lock_irqsave(&tp->indirect_lock, flags);
516 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
526 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528 TG3_64BIT_REG_LOW, val);
531 if (off == TG3_RX_STD_PROD_IDX_REG) {
532 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533 TG3_64BIT_REG_LOW, val);
537 spin_lock_irqsave(&tp->indirect_lock, flags);
538 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540 spin_unlock_irqrestore(&tp->indirect_lock, flags);
542 /* In indirect mode when disabling interrupts, we also need
543 * to clear the interrupt bit in the GRC local ctrl register.
545 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
547 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
557 spin_lock_irqsave(&tp->indirect_lock, flags);
558 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565 * where it is unsafe to read back the register without some delay.
566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
571 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572 /* Non-posted methods */
573 tp->write32(tp, off, val);
576 tg3_write32(tp, off, val);
581 /* Wait again after the read for the posted method to guarantee that
582 * the wait time is met.
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
590 tp->write32_mbox(tp, off, val);
591 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593 !tg3_flag(tp, ICH_WORKAROUND)))
594 tp->read32_mbox(tp, off);
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
599 void __iomem *mbox = tp->regs + off;
601 if (tg3_flag(tp, TXD_MBOX_HWBUG))
603 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604 tg3_flag(tp, FLUSH_POSTED_WRITES))
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
610 return readl(tp->regs + off + GRCMBOX_BASE);
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
615 writel(val, tp->regs + off + GRCMBOX_BASE);
618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
624 #define tw32(reg, val) tp->write32(tp, reg, val)
625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg) tp->read32(tp, reg)
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
633 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
637 spin_lock_irqsave(&tp->indirect_lock, flags);
638 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646 tw32_f(TG3PCI_MEM_WIN_DATA, val);
648 /* Always leave this as zero. */
649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
651 spin_unlock_irqrestore(&tp->indirect_lock, flags);
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
658 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
664 spin_lock_irqsave(&tp->indirect_lock, flags);
665 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
669 /* Always leave this as zero. */
670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673 *val = tr32(TG3PCI_MEM_WIN_DATA);
675 /* Always leave this as zero. */
676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
678 spin_unlock_irqrestore(&tp->indirect_lock, flags);
681 static void tg3_ape_lock_init(struct tg3 *tp)
686 if (tg3_asic_rev(tp) == ASIC_REV_5761)
687 regbase = TG3_APE_LOCK_GRANT;
689 regbase = TG3_APE_PER_LOCK_GRANT;
691 /* Make sure the driver hasn't any stale locks. */
692 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
694 case TG3_APE_LOCK_PHY0:
695 case TG3_APE_LOCK_PHY1:
696 case TG3_APE_LOCK_PHY2:
697 case TG3_APE_LOCK_PHY3:
698 bit = APE_LOCK_GRANT_DRIVER;
702 bit = APE_LOCK_GRANT_DRIVER;
704 bit = 1 << tp->pci_fn;
706 tg3_ape_write32(tp, regbase + 4 * i, bit);
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
715 u32 status, req, gnt, bit;
717 if (!tg3_flag(tp, ENABLE_APE))
721 case TG3_APE_LOCK_GPIO:
722 if (tg3_asic_rev(tp) == ASIC_REV_5761)
725 case TG3_APE_LOCK_GRC:
726 case TG3_APE_LOCK_MEM:
728 bit = APE_LOCK_REQ_DRIVER;
730 bit = 1 << tp->pci_fn;
732 case TG3_APE_LOCK_PHY0:
733 case TG3_APE_LOCK_PHY1:
734 case TG3_APE_LOCK_PHY2:
735 case TG3_APE_LOCK_PHY3:
736 bit = APE_LOCK_REQ_DRIVER;
742 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
743 req = TG3_APE_LOCK_REQ;
744 gnt = TG3_APE_LOCK_GRANT;
746 req = TG3_APE_PER_LOCK_REQ;
747 gnt = TG3_APE_PER_LOCK_GRANT;
752 tg3_ape_write32(tp, req + off, bit);
754 /* Wait for up to 1 millisecond to acquire lock. */
755 for (i = 0; i < 100; i++) {
756 status = tg3_ape_read32(tp, gnt + off);
759 if (pci_channel_offline(tp->pdev))
766 /* Revoke the lock request. */
767 tg3_ape_write32(tp, gnt + off, bit);
774 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
778 if (!tg3_flag(tp, ENABLE_APE))
782 case TG3_APE_LOCK_GPIO:
783 if (tg3_asic_rev(tp) == ASIC_REV_5761)
786 case TG3_APE_LOCK_GRC:
787 case TG3_APE_LOCK_MEM:
789 bit = APE_LOCK_GRANT_DRIVER;
791 bit = 1 << tp->pci_fn;
793 case TG3_APE_LOCK_PHY0:
794 case TG3_APE_LOCK_PHY1:
795 case TG3_APE_LOCK_PHY2:
796 case TG3_APE_LOCK_PHY3:
797 bit = APE_LOCK_GRANT_DRIVER;
803 if (tg3_asic_rev(tp) == ASIC_REV_5761)
804 gnt = TG3_APE_LOCK_GRANT;
806 gnt = TG3_APE_PER_LOCK_GRANT;
808 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
816 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
819 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
820 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
826 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
829 return timeout_us ? 0 : -EBUSY;
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
837 for (i = 0; i < timeout_us / 10; i++) {
838 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
840 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
846 return i == timeout_us / 10;
849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
853 u32 i, bufoff, msgoff, maxlen, apedata;
855 if (!tg3_flag(tp, APE_HAS_NCSI))
858 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
859 if (apedata != APE_SEG_SIG_MAGIC)
862 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
863 if (!(apedata & APE_FW_STATUS_READY))
866 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
868 msgoff = bufoff + 2 * sizeof(u32);
869 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
874 /* Cap xfer sizes to scratchpad limits. */
875 length = (len > maxlen) ? maxlen : len;
878 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
879 if (!(apedata & APE_FW_STATUS_READY))
882 /* Wait for up to 1 msec for APE to service previous event. */
883 err = tg3_ape_event_lock(tp, 1000);
887 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
888 APE_EVENT_STATUS_SCRTCHPD_READ |
889 APE_EVENT_STATUS_EVENT_PENDING;
890 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
892 tg3_ape_write32(tp, bufoff, base_off);
893 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
895 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
896 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
900 if (tg3_ape_wait_for_event(tp, 30000))
903 for (i = 0; length; i += 4, length -= 4) {
904 u32 val = tg3_ape_read32(tp, msgoff + i);
905 memcpy(data, &val, sizeof(u32));
914 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
919 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
920 if (apedata != APE_SEG_SIG_MAGIC)
923 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
924 if (!(apedata & APE_FW_STATUS_READY))
927 /* Wait for up to 20 millisecond for APE to service previous event. */
928 err = tg3_ape_event_lock(tp, 20000);
932 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
933 event | APE_EVENT_STATUS_EVENT_PENDING);
935 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
936 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
946 if (!tg3_flag(tp, ENABLE_APE))
950 case RESET_KIND_INIT:
951 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
952 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
953 APE_HOST_SEG_SIG_MAGIC);
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
955 APE_HOST_SEG_LEN_MAGIC);
956 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
957 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
959 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
960 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
961 APE_HOST_BEHAV_NO_PHYLOCK);
962 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
963 TG3_APE_HOST_DRVR_STATE_START);
965 event = APE_EVENT_STATUS_STATE_START;
967 case RESET_KIND_SHUTDOWN:
968 if (device_may_wakeup(&tp->pdev->dev) &&
969 tg3_flag(tp, WOL_ENABLE)) {
970 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971 TG3_APE_HOST_WOL_SPEED_AUTO);
972 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
974 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
976 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
978 event = APE_EVENT_STATUS_STATE_UNLOAD;
984 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
986 tg3_ape_send_event(tp, event);
989 static void tg3_send_ape_heartbeat(struct tg3 *tp,
990 unsigned long interval)
992 /* Check if hb interval has exceeded */
993 if (!tg3_flag(tp, ENABLE_APE) ||
994 time_before(jiffies, tp->ape_hb_jiffies + interval))
997 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
998 tp->ape_hb_jiffies = jiffies;
1001 static void tg3_disable_ints(struct tg3 *tp)
1005 tw32(TG3PCI_MISC_HOST_CTRL,
1006 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1007 for (i = 0; i < tp->irq_max; i++)
1008 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1011 static void tg3_enable_ints(struct tg3 *tp)
1018 tw32(TG3PCI_MISC_HOST_CTRL,
1019 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1021 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1022 for (i = 0; i < tp->irq_cnt; i++) {
1023 struct tg3_napi *tnapi = &tp->napi[i];
1025 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026 if (tg3_flag(tp, 1SHOT_MSI))
1027 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1029 tp->coal_now |= tnapi->coal_now;
1032 /* Force an initial interrupt */
1033 if (!tg3_flag(tp, TAGGED_STATUS) &&
1034 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1035 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1037 tw32(HOSTCC_MODE, tp->coal_now);
1039 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1044 struct tg3 *tp = tnapi->tp;
1045 struct tg3_hw_status *sblk = tnapi->hw_status;
1046 unsigned int work_exists = 0;
1048 /* check for phy events */
1049 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1050 if (sblk->status & SD_STATUS_LINK_CHG)
1054 /* check for TX work to do */
1055 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1058 /* check for RX work to do */
1059 if (tnapi->rx_rcb_prod_idx &&
1060 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1067 * similar to tg3_enable_ints, but it accurately determines whether there
1068 * is new work pending and can return without flushing the PIO write
1069 * which reenables interrupts
1071 static void tg3_int_reenable(struct tg3_napi *tnapi)
1073 struct tg3 *tp = tnapi->tp;
1075 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1077 /* When doing tagged status, this work check is unnecessary.
1078 * The last_tag we write above tells the chip which piece of
1079 * work we've completed.
1081 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1082 tw32(HOSTCC_MODE, tp->coalesce_mode |
1083 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1086 static void tg3_switch_clocks(struct tg3 *tp)
1089 u32 orig_clock_ctrl;
1091 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1094 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1096 orig_clock_ctrl = clock_ctrl;
1097 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1098 CLOCK_CTRL_CLKRUN_OENABLE |
1100 tp->pci_clock_ctrl = clock_ctrl;
1102 if (tg3_flag(tp, 5705_PLUS)) {
1103 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1107 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1113 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1116 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1119 #define PHY_BUSY_LOOPS 5000
1121 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1128 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1130 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1134 tg3_ape_lock(tp, tp->phy_ape_lock);
1138 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1139 MI_COM_PHY_ADDR_MASK);
1140 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1141 MI_COM_REG_ADDR_MASK);
1142 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1144 tw32_f(MAC_MI_COM, frame_val);
1146 loops = PHY_BUSY_LOOPS;
1147 while (loops != 0) {
1149 frame_val = tr32(MAC_MI_COM);
1151 if ((frame_val & MI_COM_BUSY) == 0) {
1153 frame_val = tr32(MAC_MI_COM);
1161 *val = frame_val & MI_COM_DATA_MASK;
1165 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1166 tw32_f(MAC_MI_MODE, tp->mi_mode);
1170 tg3_ape_unlock(tp, tp->phy_ape_lock);
1175 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1177 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1180 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1187 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1188 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1191 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1193 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1197 tg3_ape_lock(tp, tp->phy_ape_lock);
1199 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1200 MI_COM_PHY_ADDR_MASK);
1201 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1202 MI_COM_REG_ADDR_MASK);
1203 frame_val |= (val & MI_COM_DATA_MASK);
1204 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1206 tw32_f(MAC_MI_COM, frame_val);
1208 loops = PHY_BUSY_LOOPS;
1209 while (loops != 0) {
1211 frame_val = tr32(MAC_MI_COM);
1212 if ((frame_val & MI_COM_BUSY) == 0) {
1214 frame_val = tr32(MAC_MI_COM);
1224 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1225 tw32_f(MAC_MI_MODE, tp->mi_mode);
1229 tg3_ape_unlock(tp, tp->phy_ape_lock);
1234 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1236 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1239 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1243 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1262 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1266 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1270 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1274 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1275 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1279 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1285 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1289 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1291 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1296 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1300 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1302 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1307 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1311 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1312 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1313 MII_TG3_AUXCTL_SHDWSEL_MISC);
1315 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1320 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1322 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1323 set |= MII_TG3_AUXCTL_MISC_WREN;
1325 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1328 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1333 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1339 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1341 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1343 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1344 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1349 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1351 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1352 reg | val | MII_TG3_MISC_SHDW_WREN);
1355 static int tg3_bmcr_reset(struct tg3 *tp)
1360 /* OK, reset it, and poll the BMCR_RESET bit until it
1361 * clears or we time out.
1363 phy_control = BMCR_RESET;
1364 err = tg3_writephy(tp, MII_BMCR, phy_control);
1370 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1374 if ((phy_control & BMCR_RESET) == 0) {
1386 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1388 struct tg3 *tp = bp->priv;
1391 spin_lock_bh(&tp->lock);
1393 if (__tg3_readphy(tp, mii_id, reg, &val))
1396 spin_unlock_bh(&tp->lock);
1401 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1403 struct tg3 *tp = bp->priv;
1406 spin_lock_bh(&tp->lock);
1408 if (__tg3_writephy(tp, mii_id, reg, val))
1411 spin_unlock_bh(&tp->lock);
1416 static void tg3_mdio_config_5785(struct tg3 *tp)
1419 struct phy_device *phydev;
1421 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1422 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1423 case PHY_ID_BCM50610:
1424 case PHY_ID_BCM50610M:
1425 val = MAC_PHYCFG2_50610_LED_MODES;
1427 case PHY_ID_BCMAC131:
1428 val = MAC_PHYCFG2_AC131_LED_MODES;
1430 case PHY_ID_RTL8211C:
1431 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1433 case PHY_ID_RTL8201E:
1434 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1440 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1441 tw32(MAC_PHYCFG2, val);
1443 val = tr32(MAC_PHYCFG1);
1444 val &= ~(MAC_PHYCFG1_RGMII_INT |
1445 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1446 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1447 tw32(MAC_PHYCFG1, val);
1452 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1453 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1454 MAC_PHYCFG2_FMODE_MASK_MASK |
1455 MAC_PHYCFG2_GMODE_MASK_MASK |
1456 MAC_PHYCFG2_ACT_MASK_MASK |
1457 MAC_PHYCFG2_QUAL_MASK_MASK |
1458 MAC_PHYCFG2_INBAND_ENABLE;
1460 tw32(MAC_PHYCFG2, val);
1462 val = tr32(MAC_PHYCFG1);
1463 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1464 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1471 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1472 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1473 tw32(MAC_PHYCFG1, val);
1475 val = tr32(MAC_EXT_RGMII_MODE);
1476 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1477 MAC_RGMII_MODE_RX_QUALITY |
1478 MAC_RGMII_MODE_RX_ACTIVITY |
1479 MAC_RGMII_MODE_RX_ENG_DET |
1480 MAC_RGMII_MODE_TX_ENABLE |
1481 MAC_RGMII_MODE_TX_LOWPWR |
1482 MAC_RGMII_MODE_TX_RESET);
1483 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1484 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1485 val |= MAC_RGMII_MODE_RX_INT_B |
1486 MAC_RGMII_MODE_RX_QUALITY |
1487 MAC_RGMII_MODE_RX_ACTIVITY |
1488 MAC_RGMII_MODE_RX_ENG_DET;
1489 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1490 val |= MAC_RGMII_MODE_TX_ENABLE |
1491 MAC_RGMII_MODE_TX_LOWPWR |
1492 MAC_RGMII_MODE_TX_RESET;
1494 tw32(MAC_EXT_RGMII_MODE, val);
1497 static void tg3_mdio_start(struct tg3 *tp)
1499 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1500 tw32_f(MAC_MI_MODE, tp->mi_mode);
1503 if (tg3_flag(tp, MDIOBUS_INITED) &&
1504 tg3_asic_rev(tp) == ASIC_REV_5785)
1505 tg3_mdio_config_5785(tp);
1508 static int tg3_mdio_init(struct tg3 *tp)
1512 struct phy_device *phydev;
1514 if (tg3_flag(tp, 5717_PLUS)) {
1517 tp->phy_addr = tp->pci_fn + 1;
1519 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1520 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1522 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1523 TG3_CPMU_PHY_STRAP_IS_SERDES;
1526 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1529 addr = ssb_gige_get_phyaddr(tp->pdev);
1532 tp->phy_addr = addr;
1534 tp->phy_addr = TG3_PHY_MII_ADDR;
1538 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1541 tp->mdio_bus = mdiobus_alloc();
1542 if (tp->mdio_bus == NULL)
1545 tp->mdio_bus->name = "tg3 mdio bus";
1546 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1547 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1548 tp->mdio_bus->priv = tp;
1549 tp->mdio_bus->parent = &tp->pdev->dev;
1550 tp->mdio_bus->read = &tg3_mdio_read;
1551 tp->mdio_bus->write = &tg3_mdio_write;
1552 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1554 /* The bus registration will look for all the PHYs on the mdio bus.
1555 * Unfortunately, it does not ensure the PHY is powered up before
1556 * accessing the PHY ID registers. A chip reset is the
1557 * quickest way to bring the device back to an operational state..
1559 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1562 i = mdiobus_register(tp->mdio_bus);
1564 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1565 mdiobus_free(tp->mdio_bus);
1569 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1571 if (!phydev || !phydev->drv) {
1572 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1573 mdiobus_unregister(tp->mdio_bus);
1574 mdiobus_free(tp->mdio_bus);
1578 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1579 case PHY_ID_BCM57780:
1580 phydev->interface = PHY_INTERFACE_MODE_GMII;
1581 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 case PHY_ID_BCM50610:
1584 case PHY_ID_BCM50610M:
1585 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1586 PHY_BRCM_RX_REFCLK_UNUSED |
1587 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1588 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1589 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1590 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1591 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1592 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1593 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1594 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1596 case PHY_ID_RTL8211C:
1597 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1599 case PHY_ID_RTL8201E:
1600 case PHY_ID_BCMAC131:
1601 phydev->interface = PHY_INTERFACE_MODE_MII;
1602 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1603 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1607 tg3_flag_set(tp, MDIOBUS_INITED);
1609 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1610 tg3_mdio_config_5785(tp);
1615 static void tg3_mdio_fini(struct tg3 *tp)
1617 if (tg3_flag(tp, MDIOBUS_INITED)) {
1618 tg3_flag_clear(tp, MDIOBUS_INITED);
1619 mdiobus_unregister(tp->mdio_bus);
1620 mdiobus_free(tp->mdio_bus);
1624 /* tp->lock is held. */
1625 static inline void tg3_generate_fw_event(struct tg3 *tp)
1629 val = tr32(GRC_RX_CPU_EVENT);
1630 val |= GRC_RX_CPU_DRIVER_EVENT;
1631 tw32_f(GRC_RX_CPU_EVENT, val);
1633 tp->last_event_jiffies = jiffies;
1636 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1638 /* tp->lock is held. */
1639 static void tg3_wait_for_event_ack(struct tg3 *tp)
1642 unsigned int delay_cnt;
1645 /* If enough time has passed, no wait is necessary. */
1646 time_remain = (long)(tp->last_event_jiffies + 1 +
1647 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1649 if (time_remain < 0)
1652 /* Check if we can shorten the wait time. */
1653 delay_cnt = jiffies_to_usecs(time_remain);
1654 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1655 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1656 delay_cnt = (delay_cnt >> 3) + 1;
1658 for (i = 0; i < delay_cnt; i++) {
1659 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1661 if (pci_channel_offline(tp->pdev))
1668 /* tp->lock is held. */
1669 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1674 if (!tg3_readphy(tp, MII_BMCR, ®))
1676 if (!tg3_readphy(tp, MII_BMSR, ®))
1677 val |= (reg & 0xffff);
1681 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1683 if (!tg3_readphy(tp, MII_LPA, ®))
1684 val |= (reg & 0xffff);
1688 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1689 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1691 if (!tg3_readphy(tp, MII_STAT1000, ®))
1692 val |= (reg & 0xffff);
1696 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1703 /* tp->lock is held. */
1704 static void tg3_ump_link_report(struct tg3 *tp)
1708 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1711 tg3_phy_gather_ump_data(tp, data);
1713 tg3_wait_for_event_ack(tp);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1716 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1717 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1718 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1719 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1722 tg3_generate_fw_event(tp);
1725 /* tp->lock is held. */
1726 static void tg3_stop_fw(struct tg3 *tp)
1728 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1729 /* Wait for RX cpu to ACK the previous event. */
1730 tg3_wait_for_event_ack(tp);
1732 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1734 tg3_generate_fw_event(tp);
1736 /* Wait for RX cpu to ACK this event. */
1737 tg3_wait_for_event_ack(tp);
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1744 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1745 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1747 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1749 case RESET_KIND_INIT:
1750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 case RESET_KIND_SHUTDOWN:
1755 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 case RESET_KIND_SUSPEND:
1760 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 /* tp->lock is held. */
1771 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1773 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1775 case RESET_KIND_INIT:
1776 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 DRV_STATE_START_DONE);
1780 case RESET_KIND_SHUTDOWN:
1781 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 DRV_STATE_UNLOAD_DONE);
1791 /* tp->lock is held. */
1792 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1794 if (tg3_flag(tp, ENABLE_ASF)) {
1796 case RESET_KIND_INIT:
1797 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1801 case RESET_KIND_SHUTDOWN:
1802 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1806 case RESET_KIND_SUSPEND:
1807 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1817 static int tg3_poll_fw(struct tg3 *tp)
1822 if (tg3_flag(tp, NO_FWARE_REPORTED))
1825 if (tg3_flag(tp, IS_SSB_CORE)) {
1826 /* We don't use firmware. */
1830 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1831 /* Wait up to 20ms for init done. */
1832 for (i = 0; i < 200; i++) {
1833 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1835 if (pci_channel_offline(tp->pdev))
1843 /* Wait for firmware initialization to complete. */
1844 for (i = 0; i < 100000; i++) {
1845 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1846 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1848 if (pci_channel_offline(tp->pdev)) {
1849 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1850 tg3_flag_set(tp, NO_FWARE_REPORTED);
1851 netdev_info(tp->dev, "No firmware running\n");
1860 /* Chip might not be fitted with firmware. Some Sun onboard
1861 * parts are configured like that. So don't signal the timeout
1862 * of the above loop as an error, but do report the lack of
1863 * running firmware once.
1865 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1866 tg3_flag_set(tp, NO_FWARE_REPORTED);
1868 netdev_info(tp->dev, "No firmware running\n");
1871 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1872 /* The 57765 A0 needs a little more
1873 * time to do some important work.
1881 static void tg3_link_report(struct tg3 *tp)
1883 if (!netif_carrier_ok(tp->dev)) {
1884 netif_info(tp, link, tp->dev, "Link is down\n");
1885 tg3_ump_link_report(tp);
1886 } else if (netif_msg_link(tp)) {
1887 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1888 (tp->link_config.active_speed == SPEED_1000 ?
1890 (tp->link_config.active_speed == SPEED_100 ?
1892 (tp->link_config.active_duplex == DUPLEX_FULL ?
1895 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1896 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1898 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1901 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1902 netdev_info(tp->dev, "EEE is %s\n",
1903 tp->setlpicnt ? "enabled" : "disabled");
1905 tg3_ump_link_report(tp);
1908 tp->link_up = netif_carrier_ok(tp->dev);
1911 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1915 if (adv & ADVERTISE_PAUSE_CAP) {
1916 flowctrl |= FLOW_CTRL_RX;
1917 if (!(adv & ADVERTISE_PAUSE_ASYM))
1918 flowctrl |= FLOW_CTRL_TX;
1919 } else if (adv & ADVERTISE_PAUSE_ASYM)
1920 flowctrl |= FLOW_CTRL_TX;
1925 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1929 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1930 miireg = ADVERTISE_1000XPAUSE;
1931 else if (flow_ctrl & FLOW_CTRL_TX)
1932 miireg = ADVERTISE_1000XPSE_ASYM;
1933 else if (flow_ctrl & FLOW_CTRL_RX)
1934 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1941 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1945 if (adv & ADVERTISE_1000XPAUSE) {
1946 flowctrl |= FLOW_CTRL_RX;
1947 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1948 flowctrl |= FLOW_CTRL_TX;
1949 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1950 flowctrl |= FLOW_CTRL_TX;
1955 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1959 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1960 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1961 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1962 if (lcladv & ADVERTISE_1000XPAUSE)
1964 if (rmtadv & ADVERTISE_1000XPAUSE)
1971 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1975 u32 old_rx_mode = tp->rx_mode;
1976 u32 old_tx_mode = tp->tx_mode;
1978 if (tg3_flag(tp, USE_PHYLIB))
1979 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1981 autoneg = tp->link_config.autoneg;
1983 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1984 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1985 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1987 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1989 flowctrl = tp->link_config.flowctrl;
1991 tp->link_config.active_flowctrl = flowctrl;
1993 if (flowctrl & FLOW_CTRL_RX)
1994 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1996 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1998 if (old_rx_mode != tp->rx_mode)
1999 tw32_f(MAC_RX_MODE, tp->rx_mode);
2001 if (flowctrl & FLOW_CTRL_TX)
2002 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2004 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2006 if (old_tx_mode != tp->tx_mode)
2007 tw32_f(MAC_TX_MODE, tp->tx_mode);
2010 static void tg3_adjust_link(struct net_device *dev)
2012 u8 oldflowctrl, linkmesg = 0;
2013 u32 mac_mode, lcl_adv, rmt_adv;
2014 struct tg3 *tp = netdev_priv(dev);
2015 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2017 spin_lock_bh(&tp->lock);
2019 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2020 MAC_MODE_HALF_DUPLEX);
2022 oldflowctrl = tp->link_config.active_flowctrl;
2028 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2029 mac_mode |= MAC_MODE_PORT_MODE_MII;
2030 else if (phydev->speed == SPEED_1000 ||
2031 tg3_asic_rev(tp) != ASIC_REV_5785)
2032 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2034 mac_mode |= MAC_MODE_PORT_MODE_MII;
2036 if (phydev->duplex == DUPLEX_HALF)
2037 mac_mode |= MAC_MODE_HALF_DUPLEX;
2039 lcl_adv = mii_advertise_flowctrl(
2040 tp->link_config.flowctrl);
2043 rmt_adv = LPA_PAUSE_CAP;
2044 if (phydev->asym_pause)
2045 rmt_adv |= LPA_PAUSE_ASYM;
2048 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2050 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2052 if (mac_mode != tp->mac_mode) {
2053 tp->mac_mode = mac_mode;
2054 tw32_f(MAC_MODE, tp->mac_mode);
2058 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2059 if (phydev->speed == SPEED_10)
2061 MAC_MI_STAT_10MBPS_MODE |
2062 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2064 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2067 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2068 tw32(MAC_TX_LENGTHS,
2069 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 (6 << TX_LENGTHS_IPG_SHIFT) |
2071 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2073 tw32(MAC_TX_LENGTHS,
2074 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2075 (6 << TX_LENGTHS_IPG_SHIFT) |
2076 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078 if (phydev->link != tp->old_link ||
2079 phydev->speed != tp->link_config.active_speed ||
2080 phydev->duplex != tp->link_config.active_duplex ||
2081 oldflowctrl != tp->link_config.active_flowctrl)
2084 tp->old_link = phydev->link;
2085 tp->link_config.active_speed = phydev->speed;
2086 tp->link_config.active_duplex = phydev->duplex;
2088 spin_unlock_bh(&tp->lock);
2091 tg3_link_report(tp);
2094 static int tg3_phy_init(struct tg3 *tp)
2096 struct phy_device *phydev;
2098 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2101 /* Bring the PHY back to a known state. */
2104 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2106 /* Attach the MAC to the PHY. */
2107 phydev = phy_connect(tp->dev, phydev_name(phydev),
2108 tg3_adjust_link, phydev->interface);
2109 if (IS_ERR(phydev)) {
2110 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2111 return PTR_ERR(phydev);
2114 /* Mask with MAC supported features. */
2115 switch (phydev->interface) {
2116 case PHY_INTERFACE_MODE_GMII:
2117 case PHY_INTERFACE_MODE_RGMII:
2118 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2119 phy_set_max_speed(phydev, SPEED_1000);
2120 phy_support_asym_pause(phydev);
2124 case PHY_INTERFACE_MODE_MII:
2125 phy_set_max_speed(phydev, SPEED_100);
2126 phy_support_asym_pause(phydev);
2129 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2133 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2135 phy_attached_info(phydev);
2140 static void tg3_phy_start(struct tg3 *tp)
2142 struct phy_device *phydev;
2144 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2147 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2149 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2150 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2151 phydev->speed = tp->link_config.speed;
2152 phydev->duplex = tp->link_config.duplex;
2153 phydev->autoneg = tp->link_config.autoneg;
2154 ethtool_convert_legacy_u32_to_link_mode(
2155 phydev->advertising, tp->link_config.advertising);
2160 phy_start_aneg(phydev);
2163 static void tg3_phy_stop(struct tg3 *tp)
2165 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2168 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2171 static void tg3_phy_fini(struct tg3 *tp)
2173 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2174 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2175 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2179 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2184 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2187 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2188 /* Cannot do read-modify-write on 5401 */
2189 err = tg3_phy_auxctl_write(tp,
2190 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2191 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2196 err = tg3_phy_auxctl_read(tp,
2197 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2201 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2202 err = tg3_phy_auxctl_write(tp,
2203 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2209 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2213 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2216 tg3_writephy(tp, MII_TG3_FET_TEST,
2217 phytest | MII_TG3_FET_SHADOW_EN);
2218 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2220 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2222 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2223 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2225 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2229 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2233 if (!tg3_flag(tp, 5705_PLUS) ||
2234 (tg3_flag(tp, 5717_PLUS) &&
2235 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2238 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2239 tg3_phy_fet_toggle_apd(tp, enable);
2243 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2244 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2245 MII_TG3_MISC_SHDW_SCR5_SDTL |
2246 MII_TG3_MISC_SHDW_SCR5_C125OE;
2247 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2248 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2250 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2253 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2255 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2257 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2260 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2264 if (!tg3_flag(tp, 5705_PLUS) ||
2265 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2268 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2271 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2272 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2274 tg3_writephy(tp, MII_TG3_FET_TEST,
2275 ephy | MII_TG3_FET_SHADOW_EN);
2276 if (!tg3_readphy(tp, reg, &phy)) {
2278 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2280 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2281 tg3_writephy(tp, reg, phy);
2283 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2288 ret = tg3_phy_auxctl_read(tp,
2289 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2292 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2294 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2295 tg3_phy_auxctl_write(tp,
2296 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2301 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2306 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2309 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2311 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2312 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2315 static void tg3_phy_apply_otp(struct tg3 *tp)
2324 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2327 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2328 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2329 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2331 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2332 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2333 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2335 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2336 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2337 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2339 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2340 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2342 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2343 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2345 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2346 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2347 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2349 tg3_phy_toggle_auxctl_smdsp(tp, false);
2352 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2355 struct ethtool_eee *dest = &tp->eee;
2357 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2366 /* Pull eee_active */
2367 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2368 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2369 dest->eee_active = 1;
2371 dest->eee_active = 0;
2373 /* Pull lp advertised settings */
2374 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2376 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2378 /* Pull advertised and eee_enabled settings */
2379 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2381 dest->eee_enabled = !!val;
2382 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2384 /* Pull tx_lpi_enabled */
2385 val = tr32(TG3_CPMU_EEE_MODE);
2386 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2388 /* Pull lpi timer value */
2389 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2392 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2396 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2401 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2403 tp->link_config.active_duplex == DUPLEX_FULL &&
2404 (tp->link_config.active_speed == SPEED_100 ||
2405 tp->link_config.active_speed == SPEED_1000)) {
2408 if (tp->link_config.active_speed == SPEED_1000)
2409 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2411 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2413 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2415 tg3_eee_pull_config(tp, NULL);
2416 if (tp->eee.eee_active)
2420 if (!tp->setlpicnt) {
2421 if (current_link_up &&
2422 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2424 tg3_phy_toggle_auxctl_smdsp(tp, false);
2427 val = tr32(TG3_CPMU_EEE_MODE);
2428 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2432 static void tg3_phy_eee_enable(struct tg3 *tp)
2436 if (tp->link_config.active_speed == SPEED_1000 &&
2437 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2438 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2439 tg3_flag(tp, 57765_CLASS)) &&
2440 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2441 val = MII_TG3_DSP_TAP26_ALNOKO |
2442 MII_TG3_DSP_TAP26_RMRXSTO;
2443 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2444 tg3_phy_toggle_auxctl_smdsp(tp, false);
2447 val = tr32(TG3_CPMU_EEE_MODE);
2448 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2451 static int tg3_wait_macro_done(struct tg3 *tp)
2458 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2459 if ((tmp32 & 0x1000) == 0)
2469 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2471 static const u32 test_pat[4][6] = {
2472 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2473 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2474 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2475 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2479 for (chan = 0; chan < 4; chan++) {
2482 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2483 (chan * 0x2000) | 0x0200);
2484 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2486 for (i = 0; i < 6; i++)
2487 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2490 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2491 if (tg3_wait_macro_done(tp)) {
2496 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2497 (chan * 0x2000) | 0x0200);
2498 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2499 if (tg3_wait_macro_done(tp)) {
2504 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2505 if (tg3_wait_macro_done(tp)) {
2510 for (i = 0; i < 6; i += 2) {
2513 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2514 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2515 tg3_wait_macro_done(tp)) {
2521 if (low != test_pat[chan][i] ||
2522 high != test_pat[chan][i+1]) {
2523 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2524 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2525 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2535 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2539 for (chan = 0; chan < 4; chan++) {
2542 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2543 (chan * 0x2000) | 0x0200);
2544 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2545 for (i = 0; i < 6; i++)
2546 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2547 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2548 if (tg3_wait_macro_done(tp))
2555 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2557 u32 reg32, phy9_orig;
2558 int retries, do_phy_reset, err;
2564 err = tg3_bmcr_reset(tp);
2570 /* Disable transmitter and interrupt. */
2571 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2575 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2577 /* Set full-duplex, 1000 mbps. */
2578 tg3_writephy(tp, MII_BMCR,
2579 BMCR_FULLDPLX | BMCR_SPEED1000);
2581 /* Set to master mode. */
2582 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2585 tg3_writephy(tp, MII_CTRL1000,
2586 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2588 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2592 /* Block the PHY control access. */
2593 tg3_phydsp_write(tp, 0x8005, 0x0800);
2595 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2598 } while (--retries);
2600 err = tg3_phy_reset_chanpat(tp);
2604 tg3_phydsp_write(tp, 0x8005, 0x0000);
2606 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2607 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2609 tg3_phy_toggle_auxctl_smdsp(tp, false);
2611 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2613 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2618 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2623 static void tg3_carrier_off(struct tg3 *tp)
2625 netif_carrier_off(tp->dev);
2626 tp->link_up = false;
2629 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2631 if (tg3_flag(tp, ENABLE_ASF))
2632 netdev_warn(tp->dev,
2633 "Management side-band traffic will be interrupted during phy settings change\n");
2636 /* This will reset the tigon3 PHY if there is no valid
2637 * link unless the FORCE argument is non-zero.
2639 static int tg3_phy_reset(struct tg3 *tp)
2644 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2645 val = tr32(GRC_MISC_CFG);
2646 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2649 err = tg3_readphy(tp, MII_BMSR, &val);
2650 err |= tg3_readphy(tp, MII_BMSR, &val);
2654 if (netif_running(tp->dev) && tp->link_up) {
2655 netif_carrier_off(tp->dev);
2656 tg3_link_report(tp);
2659 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2660 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2661 tg3_asic_rev(tp) == ASIC_REV_5705) {
2662 err = tg3_phy_reset_5703_4_5(tp);
2669 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2670 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2671 cpmuctrl = tr32(TG3_CPMU_CTRL);
2672 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2674 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2677 err = tg3_bmcr_reset(tp);
2681 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2682 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2683 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2685 tw32(TG3_CPMU_CTRL, cpmuctrl);
2688 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2689 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2690 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2691 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2692 CPMU_LSPD_1000MB_MACCLK_12_5) {
2693 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2695 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2699 if (tg3_flag(tp, 5717_PLUS) &&
2700 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2703 tg3_phy_apply_otp(tp);
2705 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2706 tg3_phy_toggle_apd(tp, true);
2708 tg3_phy_toggle_apd(tp, false);
2711 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2712 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2714 tg3_phydsp_write(tp, 0x000a, 0x0323);
2715 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2719 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2720 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2723 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2724 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2725 tg3_phydsp_write(tp, 0x000a, 0x310b);
2726 tg3_phydsp_write(tp, 0x201f, 0x9506);
2727 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2728 tg3_phy_toggle_auxctl_smdsp(tp, false);
2730 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2731 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2732 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2733 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2734 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2735 tg3_writephy(tp, MII_TG3_TEST1,
2736 MII_TG3_TEST1_TRIM_EN | 0x4);
2738 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2740 tg3_phy_toggle_auxctl_smdsp(tp, false);
2744 /* Set Extended packet length bit (bit 14) on all chips that */
2745 /* support jumbo frames */
2746 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2747 /* Cannot do read-modify-write on 5401 */
2748 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2749 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750 /* Set bit 14 with read-modify-write to preserve other bits */
2751 err = tg3_phy_auxctl_read(tp,
2752 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2754 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2755 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2758 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2759 * jumbo frames transmission.
2761 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2762 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2763 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2764 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2767 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2768 /* adjust output voltage */
2769 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2772 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2773 tg3_phydsp_write(tp, 0xffb, 0x4000);
2775 tg3_phy_toggle_automdix(tp, true);
2776 tg3_phy_set_wirespeed(tp);
2780 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2781 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2782 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2783 TG3_GPIO_MSG_NEED_VAUX)
2784 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2785 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2786 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2787 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2788 (TG3_GPIO_MSG_DRVR_PRES << 12))
2790 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2791 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2792 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2793 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2794 (TG3_GPIO_MSG_NEED_VAUX << 12))
2796 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2800 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 tg3_asic_rev(tp) == ASIC_REV_5719)
2802 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2804 status = tr32(TG3_CPMU_DRV_STATUS);
2806 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2807 status &= ~(TG3_GPIO_MSG_MASK << shift);
2808 status |= (newstat << shift);
2810 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2811 tg3_asic_rev(tp) == ASIC_REV_5719)
2812 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2814 tw32(TG3_CPMU_DRV_STATUS, status);
2816 return status >> TG3_APE_GPIO_MSG_SHIFT;
2819 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2821 if (!tg3_flag(tp, IS_NIC))
2824 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2825 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2826 tg3_asic_rev(tp) == ASIC_REV_5720) {
2827 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2830 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2832 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2837 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2848 if (!tg3_flag(tp, IS_NIC) ||
2849 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2850 tg3_asic_rev(tp) == ASIC_REV_5701)
2853 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2855 tw32_wait_f(GRC_LOCAL_CTRL,
2856 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2857 TG3_GRC_LCLCTL_PWRSW_DELAY);
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 tw32_wait_f(GRC_LOCAL_CTRL,
2864 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2865 TG3_GRC_LCLCTL_PWRSW_DELAY);
2868 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2870 if (!tg3_flag(tp, IS_NIC))
2873 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2874 tg3_asic_rev(tp) == ASIC_REV_5701) {
2875 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2876 (GRC_LCLCTRL_GPIO_OE0 |
2877 GRC_LCLCTRL_GPIO_OE1 |
2878 GRC_LCLCTRL_GPIO_OE2 |
2879 GRC_LCLCTRL_GPIO_OUTPUT0 |
2880 GRC_LCLCTRL_GPIO_OUTPUT1),
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2883 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2884 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2885 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2886 GRC_LCLCTRL_GPIO_OE1 |
2887 GRC_LCLCTRL_GPIO_OE2 |
2888 GRC_LCLCTRL_GPIO_OUTPUT0 |
2889 GRC_LCLCTRL_GPIO_OUTPUT1 |
2891 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2892 TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2895 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2896 TG3_GRC_LCLCTL_PWRSW_DELAY);
2898 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2899 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2900 TG3_GRC_LCLCTL_PWRSW_DELAY);
2903 u32 grc_local_ctrl = 0;
2905 /* Workaround to prevent overdrawing Amps. */
2906 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2907 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2908 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2910 TG3_GRC_LCLCTL_PWRSW_DELAY);
2913 /* On 5753 and variants, GPIO2 cannot be used. */
2914 no_gpio2 = tp->nic_sram_data_cfg &
2915 NIC_SRAM_DATA_CFG_NO_GPIO2;
2917 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2918 GRC_LCLCTRL_GPIO_OE1 |
2919 GRC_LCLCTRL_GPIO_OE2 |
2920 GRC_LCLCTRL_GPIO_OUTPUT1 |
2921 GRC_LCLCTRL_GPIO_OUTPUT2;
2923 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2924 GRC_LCLCTRL_GPIO_OUTPUT2);
2926 tw32_wait_f(GRC_LOCAL_CTRL,
2927 tp->grc_local_ctrl | grc_local_ctrl,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2932 tw32_wait_f(GRC_LOCAL_CTRL,
2933 tp->grc_local_ctrl | grc_local_ctrl,
2934 TG3_GRC_LCLCTL_PWRSW_DELAY);
2937 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2938 tw32_wait_f(GRC_LOCAL_CTRL,
2939 tp->grc_local_ctrl | grc_local_ctrl,
2940 TG3_GRC_LCLCTL_PWRSW_DELAY);
2945 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2949 /* Serialize power state transitions */
2950 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2953 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2954 msg = TG3_GPIO_MSG_NEED_VAUX;
2956 msg = tg3_set_function_status(tp, msg);
2958 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2961 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2962 tg3_pwrsrc_switch_to_vaux(tp);
2964 tg3_pwrsrc_die_with_vmain(tp);
2967 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2970 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2972 bool need_vaux = false;
2974 /* The GPIOs do something completely different on 57765. */
2975 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2978 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2979 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2980 tg3_asic_rev(tp) == ASIC_REV_5720) {
2981 tg3_frob_aux_power_5717(tp, include_wol ?
2982 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2986 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2987 struct net_device *dev_peer;
2989 dev_peer = pci_get_drvdata(tp->pdev_peer);
2991 /* remove_one() may have been run on the peer. */
2993 struct tg3 *tp_peer = netdev_priv(dev_peer);
2995 if (tg3_flag(tp_peer, INIT_COMPLETE))
2998 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2999 tg3_flag(tp_peer, ENABLE_ASF))
3004 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3005 tg3_flag(tp, ENABLE_ASF))
3009 tg3_pwrsrc_switch_to_vaux(tp);
3011 tg3_pwrsrc_die_with_vmain(tp);
3014 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3016 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3018 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3019 if (speed != SPEED_10)
3021 } else if (speed == SPEED_10)
3027 static bool tg3_phy_power_bug(struct tg3 *tp)
3029 switch (tg3_asic_rev(tp)) {
3034 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3043 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3052 static bool tg3_phy_led_bug(struct tg3 *tp)
3054 switch (tg3_asic_rev(tp)) {
3057 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3066 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3070 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3073 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3074 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3075 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3076 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3079 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3080 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3081 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3086 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3088 val = tr32(GRC_MISC_CFG);
3089 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3092 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3094 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3097 tg3_writephy(tp, MII_ADVERTISE, 0);
3098 tg3_writephy(tp, MII_BMCR,
3099 BMCR_ANENABLE | BMCR_ANRESTART);
3101 tg3_writephy(tp, MII_TG3_FET_TEST,
3102 phytest | MII_TG3_FET_SHADOW_EN);
3103 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3104 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3106 MII_TG3_FET_SHDW_AUXMODE4,
3109 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3112 } else if (do_low_power) {
3113 if (!tg3_phy_led_bug(tp))
3114 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3115 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3117 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3118 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3119 MII_TG3_AUXCTL_PCTL_VREG_11V;
3120 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3123 /* The PHY should not be powered down on some chips because
3126 if (tg3_phy_power_bug(tp))
3129 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3130 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3131 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3132 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3133 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3134 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3137 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3140 /* tp->lock is held. */
3141 static int tg3_nvram_lock(struct tg3 *tp)
3143 if (tg3_flag(tp, NVRAM)) {
3146 if (tp->nvram_lock_cnt == 0) {
3147 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3148 for (i = 0; i < 8000; i++) {
3149 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3154 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3158 tp->nvram_lock_cnt++;
3163 /* tp->lock is held. */
3164 static void tg3_nvram_unlock(struct tg3 *tp)
3166 if (tg3_flag(tp, NVRAM)) {
3167 if (tp->nvram_lock_cnt > 0)
3168 tp->nvram_lock_cnt--;
3169 if (tp->nvram_lock_cnt == 0)
3170 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3174 /* tp->lock is held. */
3175 static void tg3_enable_nvram_access(struct tg3 *tp)
3177 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 u32 nvaccess = tr32(NVRAM_ACCESS);
3180 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3184 /* tp->lock is held. */
3185 static void tg3_disable_nvram_access(struct tg3 *tp)
3187 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3188 u32 nvaccess = tr32(NVRAM_ACCESS);
3190 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3194 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3195 u32 offset, u32 *val)
3200 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3203 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3204 EEPROM_ADDR_DEVID_MASK |
3206 tw32(GRC_EEPROM_ADDR,
3208 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3209 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3210 EEPROM_ADDR_ADDR_MASK) |
3211 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3213 for (i = 0; i < 1000; i++) {
3214 tmp = tr32(GRC_EEPROM_ADDR);
3216 if (tmp & EEPROM_ADDR_COMPLETE)
3220 if (!(tmp & EEPROM_ADDR_COMPLETE))
3223 tmp = tr32(GRC_EEPROM_DATA);
3226 * The data will always be opposite the native endian
3227 * format. Perform a blind byteswap to compensate.
3234 #define NVRAM_CMD_TIMEOUT 10000
3236 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3240 tw32(NVRAM_CMD, nvram_cmd);
3241 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3242 usleep_range(10, 40);
3243 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3249 if (i == NVRAM_CMD_TIMEOUT)
3255 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3257 if (tg3_flag(tp, NVRAM) &&
3258 tg3_flag(tp, NVRAM_BUFFERED) &&
3259 tg3_flag(tp, FLASH) &&
3260 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3261 (tp->nvram_jedecnum == JEDEC_ATMEL))
3263 addr = ((addr / tp->nvram_pagesize) <<
3264 ATMEL_AT45DB0X1B_PAGE_POS) +
3265 (addr % tp->nvram_pagesize);
3270 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3272 if (tg3_flag(tp, NVRAM) &&
3273 tg3_flag(tp, NVRAM_BUFFERED) &&
3274 tg3_flag(tp, FLASH) &&
3275 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3276 (tp->nvram_jedecnum == JEDEC_ATMEL))
3278 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3279 tp->nvram_pagesize) +
3280 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3285 /* NOTE: Data read in from NVRAM is byteswapped according to
3286 * the byteswapping settings for all other register accesses.
3287 * tg3 devices are BE devices, so on a BE machine, the data
3288 * returned will be exactly as it is seen in NVRAM. On a LE
3289 * machine, the 32-bit value will be byteswapped.
3291 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3295 if (!tg3_flag(tp, NVRAM))
3296 return tg3_nvram_read_using_eeprom(tp, offset, val);
3298 offset = tg3_nvram_phys_addr(tp, offset);
3300 if (offset > NVRAM_ADDR_MSK)
3303 ret = tg3_nvram_lock(tp);
3307 tg3_enable_nvram_access(tp);
3309 tw32(NVRAM_ADDR, offset);
3310 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3311 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3314 *val = tr32(NVRAM_RDDATA);
3316 tg3_disable_nvram_access(tp);
3318 tg3_nvram_unlock(tp);
3323 /* Ensures NVRAM data is in bytestream format. */
3324 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3327 int res = tg3_nvram_read(tp, offset, &v);
3329 *val = cpu_to_be32(v);
3333 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3334 u32 offset, u32 len, u8 *buf)
3339 for (i = 0; i < len; i += 4) {
3345 memcpy(&data, buf + i, 4);
3348 * The SEEPROM interface expects the data to always be opposite
3349 * the native endian format. We accomplish this by reversing
3350 * all the operations that would have been performed on the
3351 * data from a call to tg3_nvram_read_be32().
3353 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3355 val = tr32(GRC_EEPROM_ADDR);
3356 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3358 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3360 tw32(GRC_EEPROM_ADDR, val |
3361 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3362 (addr & EEPROM_ADDR_ADDR_MASK) |
3366 for (j = 0; j < 1000; j++) {
3367 val = tr32(GRC_EEPROM_ADDR);
3369 if (val & EEPROM_ADDR_COMPLETE)
3373 if (!(val & EEPROM_ADDR_COMPLETE)) {
3382 /* offset and length are dword aligned */
3383 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3387 u32 pagesize = tp->nvram_pagesize;
3388 u32 pagemask = pagesize - 1;
3392 tmp = kmalloc(pagesize, GFP_KERNEL);
3398 u32 phy_addr, page_off, size;
3400 phy_addr = offset & ~pagemask;
3402 for (j = 0; j < pagesize; j += 4) {
3403 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3404 (__be32 *) (tmp + j));
3411 page_off = offset & pagemask;
3418 memcpy(tmp + page_off, buf, size);
3420 offset = offset + (pagesize - page_off);
3422 tg3_enable_nvram_access(tp);
3425 * Before we can erase the flash page, we need
3426 * to issue a special "write enable" command.
3428 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3430 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433 /* Erase the target page */
3434 tw32(NVRAM_ADDR, phy_addr);
3436 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3437 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3439 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3442 /* Issue another write enable to start the write. */
3443 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3445 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3448 for (j = 0; j < pagesize; j += 4) {
3451 data = *((__be32 *) (tmp + j));
3453 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3455 tw32(NVRAM_ADDR, phy_addr + j);
3457 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3461 nvram_cmd |= NVRAM_CMD_FIRST;
3462 else if (j == (pagesize - 4))
3463 nvram_cmd |= NVRAM_CMD_LAST;
3465 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3473 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3474 tg3_nvram_exec_cmd(tp, nvram_cmd);
3481 /* offset and length are dword aligned */
3482 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3487 for (i = 0; i < len; i += 4, offset += 4) {
3488 u32 page_off, phy_addr, nvram_cmd;
3491 memcpy(&data, buf + i, 4);
3492 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3494 page_off = offset % tp->nvram_pagesize;
3496 phy_addr = tg3_nvram_phys_addr(tp, offset);
3498 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3500 if (page_off == 0 || i == 0)
3501 nvram_cmd |= NVRAM_CMD_FIRST;
3502 if (page_off == (tp->nvram_pagesize - 4))
3503 nvram_cmd |= NVRAM_CMD_LAST;
3506 nvram_cmd |= NVRAM_CMD_LAST;
3508 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3509 !tg3_flag(tp, FLASH) ||
3510 !tg3_flag(tp, 57765_PLUS))
3511 tw32(NVRAM_ADDR, phy_addr);
3513 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3514 !tg3_flag(tp, 5755_PLUS) &&
3515 (tp->nvram_jedecnum == JEDEC_ST) &&
3516 (nvram_cmd & NVRAM_CMD_FIRST)) {
3519 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3520 ret = tg3_nvram_exec_cmd(tp, cmd);
3524 if (!tg3_flag(tp, FLASH)) {
3525 /* We always do complete word writes to eeprom. */
3526 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3529 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3536 /* offset and length are dword aligned */
3537 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3541 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3542 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3543 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3547 if (!tg3_flag(tp, NVRAM)) {
3548 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3552 ret = tg3_nvram_lock(tp);
3556 tg3_enable_nvram_access(tp);
3557 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3558 tw32(NVRAM_WRITE1, 0x406);
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3563 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3564 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3567 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3571 grc_mode = tr32(GRC_MODE);
3572 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3574 tg3_disable_nvram_access(tp);
3575 tg3_nvram_unlock(tp);
3578 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3579 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3586 #define RX_CPU_SCRATCH_BASE 0x30000
3587 #define RX_CPU_SCRATCH_SIZE 0x04000
3588 #define TX_CPU_SCRATCH_BASE 0x34000
3589 #define TX_CPU_SCRATCH_SIZE 0x04000
3591 /* tp->lock is held. */
3592 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3595 const int iters = 10000;
3597 for (i = 0; i < iters; i++) {
3598 tw32(cpu_base + CPU_STATE, 0xffffffff);
3599 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3600 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3602 if (pci_channel_offline(tp->pdev))
3606 return (i == iters) ? -EBUSY : 0;
3609 /* tp->lock is held. */
3610 static int tg3_rxcpu_pause(struct tg3 *tp)
3612 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3614 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3615 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3621 /* tp->lock is held. */
3622 static int tg3_txcpu_pause(struct tg3 *tp)
3624 return tg3_pause_cpu(tp, TX_CPU_BASE);
3627 /* tp->lock is held. */
3628 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3630 tw32(cpu_base + CPU_STATE, 0xffffffff);
3631 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3634 /* tp->lock is held. */
3635 static void tg3_rxcpu_resume(struct tg3 *tp)
3637 tg3_resume_cpu(tp, RX_CPU_BASE);
3640 /* tp->lock is held. */
3641 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3645 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3647 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3648 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3650 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3653 if (cpu_base == RX_CPU_BASE) {
3654 rc = tg3_rxcpu_pause(tp);
3657 * There is only an Rx CPU for the 5750 derivative in the
3660 if (tg3_flag(tp, IS_SSB_CORE))
3663 rc = tg3_txcpu_pause(tp);
3667 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3668 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3672 /* Clear firmware's nvram arbitration. */
3673 if (tg3_flag(tp, NVRAM))
3674 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3678 static int tg3_fw_data_len(struct tg3 *tp,
3679 const struct tg3_firmware_hdr *fw_hdr)
3683 /* Non fragmented firmware have one firmware header followed by a
3684 * contiguous chunk of data to be written. The length field in that
3685 * header is not the length of data to be written but the complete
3686 * length of the bss. The data length is determined based on
3687 * tp->fw->size minus headers.
3689 * Fragmented firmware have a main header followed by multiple
3690 * fragments. Each fragment is identical to non fragmented firmware
3691 * with a firmware header followed by a contiguous chunk of data. In
3692 * the main header, the length field is unused and set to 0xffffffff.
3693 * In each fragment header the length is the entire size of that
3694 * fragment i.e. fragment data + header length. Data length is
3695 * therefore length field in the header minus TG3_FW_HDR_LEN.
3697 if (tp->fw_len == 0xffffffff)
3698 fw_len = be32_to_cpu(fw_hdr->len);
3700 fw_len = tp->fw->size;
3702 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3705 /* tp->lock is held. */
3706 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3707 u32 cpu_scratch_base, int cpu_scratch_size,
3708 const struct tg3_firmware_hdr *fw_hdr)
3711 void (*write_op)(struct tg3 *, u32, u32);
3712 int total_len = tp->fw->size;
3714 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3716 "%s: Trying to load TX cpu firmware which is 5705\n",
3721 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3722 write_op = tg3_write_mem;
3724 write_op = tg3_write_indirect_reg32;
3726 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3727 /* It is possible that bootcode is still loading at this point.
3728 * Get the nvram lock first before halting the cpu.
3730 int lock_err = tg3_nvram_lock(tp);
3731 err = tg3_halt_cpu(tp, cpu_base);
3733 tg3_nvram_unlock(tp);
3737 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3738 write_op(tp, cpu_scratch_base + i, 0);
3739 tw32(cpu_base + CPU_STATE, 0xffffffff);
3740 tw32(cpu_base + CPU_MODE,
3741 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3743 /* Subtract additional main header for fragmented firmware and
3744 * advance to the first fragment
3746 total_len -= TG3_FW_HDR_LEN;
3751 u32 *fw_data = (u32 *)(fw_hdr + 1);
3752 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3753 write_op(tp, cpu_scratch_base +
3754 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3756 be32_to_cpu(fw_data[i]));
3758 total_len -= be32_to_cpu(fw_hdr->len);
3760 /* Advance to next fragment */
3761 fw_hdr = (struct tg3_firmware_hdr *)
3762 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3763 } while (total_len > 0);
3771 /* tp->lock is held. */
3772 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3775 const int iters = 5;
3777 tw32(cpu_base + CPU_STATE, 0xffffffff);
3778 tw32_f(cpu_base + CPU_PC, pc);
3780 for (i = 0; i < iters; i++) {
3781 if (tr32(cpu_base + CPU_PC) == pc)
3783 tw32(cpu_base + CPU_STATE, 0xffffffff);
3784 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3785 tw32_f(cpu_base + CPU_PC, pc);
3789 return (i == iters) ? -EBUSY : 0;
3792 /* tp->lock is held. */
3793 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3795 const struct tg3_firmware_hdr *fw_hdr;
3798 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3800 /* Firmware blob starts with version numbers, followed by
3801 start address and length. We are setting complete length.
3802 length = end_address_of_bss - start_address_of_text.
3803 Remainder is the blob to be loaded contiguously
3804 from start address. */
3806 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3807 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3812 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3813 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3818 /* Now startup only the RX cpu. */
3819 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3820 be32_to_cpu(fw_hdr->base_addr));
3822 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3823 "should be %08x\n", __func__,
3824 tr32(RX_CPU_BASE + CPU_PC),
3825 be32_to_cpu(fw_hdr->base_addr));
3829 tg3_rxcpu_resume(tp);
3834 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3836 const int iters = 1000;
3840 /* Wait for boot code to complete initialization and enter service
3841 * loop. It is then safe to download service patches
3843 for (i = 0; i < iters; i++) {
3844 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3851 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3855 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3857 netdev_warn(tp->dev,
3858 "Other patches exist. Not downloading EEE patch\n");
3865 /* tp->lock is held. */
3866 static void tg3_load_57766_firmware(struct tg3 *tp)
3868 struct tg3_firmware_hdr *fw_hdr;
3870 if (!tg3_flag(tp, NO_NVRAM))
3873 if (tg3_validate_rxcpu_state(tp))
3879 /* This firmware blob has a different format than older firmware
3880 * releases as given below. The main difference is we have fragmented
3881 * data to be written to non-contiguous locations.
3883 * In the beginning we have a firmware header identical to other
3884 * firmware which consists of version, base addr and length. The length
3885 * here is unused and set to 0xffffffff.
3887 * This is followed by a series of firmware fragments which are
3888 * individually identical to previous firmware. i.e. they have the
3889 * firmware header and followed by data for that fragment. The version
3890 * field of the individual fragment header is unused.
3893 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3894 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3897 if (tg3_rxcpu_pause(tp))
3900 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3901 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3903 tg3_rxcpu_resume(tp);
3906 /* tp->lock is held. */
3907 static int tg3_load_tso_firmware(struct tg3 *tp)
3909 const struct tg3_firmware_hdr *fw_hdr;
3910 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3913 if (!tg3_flag(tp, FW_TSO))
3916 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3918 /* Firmware blob starts with version numbers, followed by
3919 start address and length. We are setting complete length.
3920 length = end_address_of_bss - start_address_of_text.
3921 Remainder is the blob to be loaded contiguously
3922 from start address. */
3924 cpu_scratch_size = tp->fw_len;
3926 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3927 cpu_base = RX_CPU_BASE;
3928 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3930 cpu_base = TX_CPU_BASE;
3931 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3932 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3935 err = tg3_load_firmware_cpu(tp, cpu_base,
3936 cpu_scratch_base, cpu_scratch_size,
3941 /* Now startup the cpu. */
3942 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3943 be32_to_cpu(fw_hdr->base_addr));
3946 "%s fails to set CPU PC, is %08x should be %08x\n",
3947 __func__, tr32(cpu_base + CPU_PC),
3948 be32_to_cpu(fw_hdr->base_addr));
3952 tg3_resume_cpu(tp, cpu_base);
3956 /* tp->lock is held. */
3957 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3959 u32 addr_high, addr_low;
3961 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3962 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3963 (mac_addr[4] << 8) | mac_addr[5]);
3966 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3967 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3970 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3971 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3975 /* tp->lock is held. */
3976 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3981 for (i = 0; i < 4; i++) {
3982 if (i == 1 && skip_mac_1)
3984 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3987 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3988 tg3_asic_rev(tp) == ASIC_REV_5704) {
3989 for (i = 4; i < 16; i++)
3990 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3993 addr_high = (tp->dev->dev_addr[0] +
3994 tp->dev->dev_addr[1] +
3995 tp->dev->dev_addr[2] +
3996 tp->dev->dev_addr[3] +
3997 tp->dev->dev_addr[4] +
3998 tp->dev->dev_addr[5]) &
3999 TX_BACKOFF_SEED_MASK;
4000 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4003 static void tg3_enable_register_access(struct tg3 *tp)
4006 * Make sure register accesses (indirect or otherwise) will function
4009 pci_write_config_dword(tp->pdev,
4010 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4013 static int tg3_power_up(struct tg3 *tp)
4017 tg3_enable_register_access(tp);
4019 err = pci_set_power_state(tp->pdev, PCI_D0);
4021 /* Switch out of Vaux if it is a NIC */
4022 tg3_pwrsrc_switch_to_vmain(tp);
4024 netdev_err(tp->dev, "Transition to D0 failed\n");
4030 static int tg3_setup_phy(struct tg3 *, bool);
4032 static int tg3_power_down_prepare(struct tg3 *tp)
4035 bool device_should_wake, do_low_power;
4037 tg3_enable_register_access(tp);
4039 /* Restore the CLKREQ setting. */
4040 if (tg3_flag(tp, CLKREQ_BUG))
4041 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4042 PCI_EXP_LNKCTL_CLKREQ_EN);
4044 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4045 tw32(TG3PCI_MISC_HOST_CTRL,
4046 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4048 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4049 tg3_flag(tp, WOL_ENABLE);
4051 if (tg3_flag(tp, USE_PHYLIB)) {
4052 do_low_power = false;
4053 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4054 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4055 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4056 struct phy_device *phydev;
4059 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4061 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4063 tp->link_config.speed = phydev->speed;
4064 tp->link_config.duplex = phydev->duplex;
4065 tp->link_config.autoneg = phydev->autoneg;
4066 ethtool_convert_link_mode_to_legacy_u32(
4067 &tp->link_config.advertising,
4068 phydev->advertising);
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4073 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4075 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4078 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4079 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4080 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4082 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4084 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4087 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4092 linkmode_copy(phydev->advertising, advertising);
4093 phy_start_aneg(phydev);
4095 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4096 if (phyid != PHY_ID_BCMAC131) {
4097 phyid &= PHY_BCM_OUI_MASK;
4098 if (phyid == PHY_BCM_OUI_1 ||
4099 phyid == PHY_BCM_OUI_2 ||
4100 phyid == PHY_BCM_OUI_3)
4101 do_low_power = true;
4105 do_low_power = true;
4107 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4108 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4110 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4111 tg3_setup_phy(tp, false);
4114 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4117 val = tr32(GRC_VCPU_EXT_CTRL);
4118 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4119 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4123 for (i = 0; i < 200; i++) {
4124 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4125 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4130 if (tg3_flag(tp, WOL_CAP))
4131 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4132 WOL_DRV_STATE_SHUTDOWN |
4136 if (device_should_wake) {
4139 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4141 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4142 tg3_phy_auxctl_write(tp,
4143 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4144 MII_TG3_AUXCTL_PCTL_WOL_EN |
4145 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4146 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4150 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4151 mac_mode = MAC_MODE_PORT_MODE_GMII;
4152 else if (tp->phy_flags &
4153 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4154 if (tp->link_config.active_speed == SPEED_1000)
4155 mac_mode = MAC_MODE_PORT_MODE_GMII;
4157 mac_mode = MAC_MODE_PORT_MODE_MII;
4159 mac_mode = MAC_MODE_PORT_MODE_MII;
4161 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4162 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4163 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4164 SPEED_100 : SPEED_10;
4165 if (tg3_5700_link_polarity(tp, speed))
4166 mac_mode |= MAC_MODE_LINK_POLARITY;
4168 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4171 mac_mode = MAC_MODE_PORT_MODE_TBI;
4174 if (!tg3_flag(tp, 5750_PLUS))
4175 tw32(MAC_LED_CTRL, tp->led_ctrl);
4177 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4178 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4179 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4180 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4182 if (tg3_flag(tp, ENABLE_APE))
4183 mac_mode |= MAC_MODE_APE_TX_EN |
4184 MAC_MODE_APE_RX_EN |
4185 MAC_MODE_TDE_ENABLE;
4187 tw32_f(MAC_MODE, mac_mode);
4190 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4194 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4195 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4196 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4199 base_val = tp->pci_clock_ctrl;
4200 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4201 CLOCK_CTRL_TXCLK_DISABLE);
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4204 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4205 } else if (tg3_flag(tp, 5780_CLASS) ||
4206 tg3_flag(tp, CPMU_PRESENT) ||
4207 tg3_asic_rev(tp) == ASIC_REV_5906) {
4209 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4210 u32 newbits1, newbits2;
4212 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 CLOCK_CTRL_TXCLK_DISABLE |
4217 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218 } else if (tg3_flag(tp, 5705_PLUS)) {
4219 newbits1 = CLOCK_CTRL_625_CORE;
4220 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4222 newbits1 = CLOCK_CTRL_ALTCLK;
4223 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4229 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4232 if (!tg3_flag(tp, 5705_PLUS)) {
4235 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4236 tg3_asic_rev(tp) == ASIC_REV_5701) {
4237 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4238 CLOCK_CTRL_TXCLK_DISABLE |
4239 CLOCK_CTRL_44MHZ_CORE);
4241 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4244 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4245 tp->pci_clock_ctrl | newbits3, 40);
4249 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4250 tg3_power_down_phy(tp, do_low_power);
4252 tg3_frob_aux_power(tp, true);
4254 /* Workaround for unstable PLL clock */
4255 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4256 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4257 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4258 u32 val = tr32(0x7d00);
4260 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4262 if (!tg3_flag(tp, ENABLE_ASF)) {
4265 err = tg3_nvram_lock(tp);
4266 tg3_halt_cpu(tp, RX_CPU_BASE);
4268 tg3_nvram_unlock(tp);
4272 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4274 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4279 static void tg3_power_down(struct tg3 *tp)
4281 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4282 pci_set_power_state(tp->pdev, PCI_D3hot);
4285 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4287 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4288 case MII_TG3_AUX_STAT_10HALF:
4290 *duplex = DUPLEX_HALF;
4293 case MII_TG3_AUX_STAT_10FULL:
4295 *duplex = DUPLEX_FULL;
4298 case MII_TG3_AUX_STAT_100HALF:
4300 *duplex = DUPLEX_HALF;
4303 case MII_TG3_AUX_STAT_100FULL:
4305 *duplex = DUPLEX_FULL;
4308 case MII_TG3_AUX_STAT_1000HALF:
4309 *speed = SPEED_1000;
4310 *duplex = DUPLEX_HALF;
4313 case MII_TG3_AUX_STAT_1000FULL:
4314 *speed = SPEED_1000;
4315 *duplex = DUPLEX_FULL;
4319 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4320 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4322 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4326 *speed = SPEED_UNKNOWN;
4327 *duplex = DUPLEX_UNKNOWN;
4332 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4337 new_adv = ADVERTISE_CSMA;
4338 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4339 new_adv |= mii_advertise_flowctrl(flowctrl);
4341 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4345 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4346 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4348 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4349 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4350 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4352 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4357 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4360 tw32(TG3_CPMU_EEE_MODE,
4361 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4363 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4368 /* Advertise 100-BaseTX EEE ability */
4369 if (advertise & ADVERTISED_100baseT_Full)
4370 val |= MDIO_AN_EEE_ADV_100TX;
4371 /* Advertise 1000-BaseT EEE ability */
4372 if (advertise & ADVERTISED_1000baseT_Full)
4373 val |= MDIO_AN_EEE_ADV_1000T;
4375 if (!tp->eee.eee_enabled) {
4377 tp->eee.advertised = 0;
4379 tp->eee.advertised = advertise &
4380 (ADVERTISED_100baseT_Full |
4381 ADVERTISED_1000baseT_Full);
4384 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4388 switch (tg3_asic_rev(tp)) {
4390 case ASIC_REV_57765:
4391 case ASIC_REV_57766:
4393 /* If we advertised any eee advertisements above... */
4395 val = MII_TG3_DSP_TAP26_ALNOKO |
4396 MII_TG3_DSP_TAP26_RMRXSTO |
4397 MII_TG3_DSP_TAP26_OPCSINPT;
4398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4402 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4403 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4404 MII_TG3_DSP_CH34TP2_HIBW01);
4407 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4416 static void tg3_phy_copper_begin(struct tg3 *tp)
4418 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4419 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4422 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424 adv = ADVERTISED_10baseT_Half |
4425 ADVERTISED_10baseT_Full;
4426 if (tg3_flag(tp, WOL_SPEED_100MB))
4427 adv |= ADVERTISED_100baseT_Half |
4428 ADVERTISED_100baseT_Full;
4429 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4430 if (!(tp->phy_flags &
4431 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4432 adv |= ADVERTISED_1000baseT_Half;
4433 adv |= ADVERTISED_1000baseT_Full;
4436 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4438 adv = tp->link_config.advertising;
4439 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4440 adv &= ~(ADVERTISED_1000baseT_Half |
4441 ADVERTISED_1000baseT_Full);
4443 fc = tp->link_config.flowctrl;
4446 tg3_phy_autoneg_cfg(tp, adv, fc);
4448 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4449 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4450 /* Normally during power down we want to autonegotiate
4451 * the lowest possible speed for WOL. However, to avoid
4452 * link flap, we leave it untouched.
4457 tg3_writephy(tp, MII_BMCR,
4458 BMCR_ANENABLE | BMCR_ANRESTART);
4461 u32 bmcr, orig_bmcr;
4463 tp->link_config.active_speed = tp->link_config.speed;
4464 tp->link_config.active_duplex = tp->link_config.duplex;
4466 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4467 /* With autoneg disabled, 5715 only links up when the
4468 * advertisement register has the configured speed
4471 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4475 switch (tp->link_config.speed) {
4481 bmcr |= BMCR_SPEED100;
4485 bmcr |= BMCR_SPEED1000;
4489 if (tp->link_config.duplex == DUPLEX_FULL)
4490 bmcr |= BMCR_FULLDPLX;
4492 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4493 (bmcr != orig_bmcr)) {
4494 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4495 for (i = 0; i < 1500; i++) {
4499 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4500 tg3_readphy(tp, MII_BMSR, &tmp))
4502 if (!(tmp & BMSR_LSTATUS)) {
4507 tg3_writephy(tp, MII_BMCR, bmcr);
4513 static int tg3_phy_pull_config(struct tg3 *tp)
4518 err = tg3_readphy(tp, MII_BMCR, &val);
4522 if (!(val & BMCR_ANENABLE)) {
4523 tp->link_config.autoneg = AUTONEG_DISABLE;
4524 tp->link_config.advertising = 0;
4525 tg3_flag_clear(tp, PAUSE_AUTONEG);
4529 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4531 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4534 tp->link_config.speed = SPEED_10;
4537 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4540 tp->link_config.speed = SPEED_100;
4542 case BMCR_SPEED1000:
4543 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4544 tp->link_config.speed = SPEED_1000;
4552 if (val & BMCR_FULLDPLX)
4553 tp->link_config.duplex = DUPLEX_FULL;
4555 tp->link_config.duplex = DUPLEX_HALF;
4557 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4563 tp->link_config.autoneg = AUTONEG_ENABLE;
4564 tp->link_config.advertising = ADVERTISED_Autoneg;
4565 tg3_flag_set(tp, PAUSE_AUTONEG);
4567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4570 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4574 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4575 tp->link_config.advertising |= adv | ADVERTISED_TP;
4577 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4579 tp->link_config.advertising |= ADVERTISED_FIBRE;
4582 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4585 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4586 err = tg3_readphy(tp, MII_CTRL1000, &val);
4590 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4592 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4596 adv = tg3_decode_flowctrl_1000X(val);
4597 tp->link_config.flowctrl = adv;
4599 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4600 adv = mii_adv_to_ethtool_adv_x(val);
4603 tp->link_config.advertising |= adv;
4610 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4614 /* Turn off tap power management. */
4615 /* Set Extended packet length bit */
4616 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4618 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4619 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4620 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4621 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4622 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4629 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4631 struct ethtool_eee eee;
4633 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4636 tg3_eee_pull_config(tp, &eee);
4638 if (tp->eee.eee_enabled) {
4639 if (tp->eee.advertised != eee.advertised ||
4640 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4641 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4644 /* EEE is disabled but we're advertising */
4652 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4654 u32 advmsk, tgtadv, advertising;
4656 advertising = tp->link_config.advertising;
4657 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4659 advmsk = ADVERTISE_ALL;
4660 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4661 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4662 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4665 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4668 if ((*lcladv & advmsk) != tgtadv)
4671 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4674 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4676 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4680 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4681 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4682 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4683 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4684 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4686 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4689 if (tg3_ctrl != tgtadv)
4696 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4700 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4703 if (tg3_readphy(tp, MII_STAT1000, &val))
4706 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4709 if (tg3_readphy(tp, MII_LPA, rmtadv))
4712 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4713 tp->link_config.rmt_adv = lpeth;
4718 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4720 if (curr_link_up != tp->link_up) {
4722 netif_carrier_on(tp->dev);
4724 netif_carrier_off(tp->dev);
4725 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4726 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4729 tg3_link_report(tp);
4736 static void tg3_clear_mac_status(struct tg3 *tp)
4741 MAC_STATUS_SYNC_CHANGED |
4742 MAC_STATUS_CFG_CHANGED |
4743 MAC_STATUS_MI_COMPLETION |
4744 MAC_STATUS_LNKSTATE_CHANGED);
4748 static void tg3_setup_eee(struct tg3 *tp)
4752 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4753 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4754 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4755 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4757 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4759 tw32_f(TG3_CPMU_EEE_CTRL,
4760 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4762 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4763 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4764 TG3_CPMU_EEEMD_LPI_IN_RX |
4765 TG3_CPMU_EEEMD_EEE_ENABLE;
4767 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4768 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4770 if (tg3_flag(tp, ENABLE_APE))
4771 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4773 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4775 tw32_f(TG3_CPMU_EEE_DBTMR1,
4776 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4777 (tp->eee.tx_lpi_timer & 0xffff));
4779 tw32_f(TG3_CPMU_EEE_DBTMR2,
4780 TG3_CPMU_DBTMR2_APE_TX_2047US |
4781 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4784 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4786 bool current_link_up;
4788 u32 lcl_adv, rmt_adv;
4793 tg3_clear_mac_status(tp);
4795 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4797 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4801 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4803 /* Some third-party PHYs need to be reset on link going
4806 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4807 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4808 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4810 tg3_readphy(tp, MII_BMSR, &bmsr);
4811 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812 !(bmsr & BMSR_LSTATUS))
4818 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4821 !tg3_flag(tp, INIT_COMPLETE))
4824 if (!(bmsr & BMSR_LSTATUS)) {
4825 err = tg3_init_5401phy_dsp(tp);
4829 tg3_readphy(tp, MII_BMSR, &bmsr);
4830 for (i = 0; i < 1000; i++) {
4832 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4833 (bmsr & BMSR_LSTATUS)) {
4839 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4840 TG3_PHY_REV_BCM5401_B0 &&
4841 !(bmsr & BMSR_LSTATUS) &&
4842 tp->link_config.active_speed == SPEED_1000) {
4843 err = tg3_phy_reset(tp);
4845 err = tg3_init_5401phy_dsp(tp);
4850 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4851 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4852 /* 5701 {A0,B0} CRC bug workaround */
4853 tg3_writephy(tp, 0x15, 0x0a75);
4854 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4855 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4856 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4859 /* Clear pending interrupts... */
4860 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4861 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4863 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4864 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4865 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4866 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4868 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4869 tg3_asic_rev(tp) == ASIC_REV_5701) {
4870 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4871 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4872 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4874 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4877 current_link_up = false;
4878 current_speed = SPEED_UNKNOWN;
4879 current_duplex = DUPLEX_UNKNOWN;
4880 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4881 tp->link_config.rmt_adv = 0;
4883 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4884 err = tg3_phy_auxctl_read(tp,
4885 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4887 if (!err && !(val & (1 << 10))) {
4888 tg3_phy_auxctl_write(tp,
4889 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4896 for (i = 0; i < 100; i++) {
4897 tg3_readphy(tp, MII_BMSR, &bmsr);
4898 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4899 (bmsr & BMSR_LSTATUS))
4904 if (bmsr & BMSR_LSTATUS) {
4907 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4908 for (i = 0; i < 2000; i++) {
4910 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4915 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4920 for (i = 0; i < 200; i++) {
4921 tg3_readphy(tp, MII_BMCR, &bmcr);
4922 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4924 if (bmcr && bmcr != 0x7fff)
4932 tp->link_config.active_speed = current_speed;
4933 tp->link_config.active_duplex = current_duplex;
4935 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4936 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4938 if ((bmcr & BMCR_ANENABLE) &&
4940 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4941 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4942 current_link_up = true;
4944 /* EEE settings changes take effect only after a phy
4945 * reset. If we have skipped a reset due to Link Flap
4946 * Avoidance being enabled, do it now.
4948 if (!eee_config_ok &&
4949 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4955 if (!(bmcr & BMCR_ANENABLE) &&
4956 tp->link_config.speed == current_speed &&
4957 tp->link_config.duplex == current_duplex) {
4958 current_link_up = true;
4962 if (current_link_up &&
4963 tp->link_config.active_duplex == DUPLEX_FULL) {
4966 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4967 reg = MII_TG3_FET_GEN_STAT;
4968 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4970 reg = MII_TG3_EXT_STAT;
4971 bit = MII_TG3_EXT_STAT_MDIX;
4974 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4975 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4977 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4982 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4983 tg3_phy_copper_begin(tp);
4985 if (tg3_flag(tp, ROBOSWITCH)) {
4986 current_link_up = true;
4987 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4988 current_speed = SPEED_1000;
4989 current_duplex = DUPLEX_FULL;
4990 tp->link_config.active_speed = current_speed;
4991 tp->link_config.active_duplex = current_duplex;
4994 tg3_readphy(tp, MII_BMSR, &bmsr);
4995 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4996 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4997 current_link_up = true;
5000 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5001 if (current_link_up) {
5002 if (tp->link_config.active_speed == SPEED_100 ||
5003 tp->link_config.active_speed == SPEED_10)
5004 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5006 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5007 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5008 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5010 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5012 /* In order for the 5750 core in BCM4785 chip to work properly
5013 * in RGMII mode, the Led Control Register must be set up.
5015 if (tg3_flag(tp, RGMII_MODE)) {
5016 u32 led_ctrl = tr32(MAC_LED_CTRL);
5017 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5019 if (tp->link_config.active_speed == SPEED_10)
5020 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5021 else if (tp->link_config.active_speed == SPEED_100)
5022 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5023 LED_CTRL_100MBPS_ON);
5024 else if (tp->link_config.active_speed == SPEED_1000)
5025 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5026 LED_CTRL_1000MBPS_ON);
5028 tw32(MAC_LED_CTRL, led_ctrl);
5032 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5033 if (tp->link_config.active_duplex == DUPLEX_HALF)
5034 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5036 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5037 if (current_link_up &&
5038 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5039 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5041 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5044 /* ??? Without this setting Netgear GA302T PHY does not
5045 * ??? send/receive packets...
5047 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5048 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5049 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5050 tw32_f(MAC_MI_MODE, tp->mi_mode);
5054 tw32_f(MAC_MODE, tp->mac_mode);
5057 tg3_phy_eee_adjust(tp, current_link_up);
5059 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5060 /* Polled via timer. */
5061 tw32_f(MAC_EVENT, 0);
5063 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5067 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5069 tp->link_config.active_speed == SPEED_1000 &&
5070 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5073 (MAC_STATUS_SYNC_CHANGED |
5074 MAC_STATUS_CFG_CHANGED));
5077 NIC_SRAM_FIRMWARE_MBOX,
5078 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5081 /* Prevent send BD corruption. */
5082 if (tg3_flag(tp, CLKREQ_BUG)) {
5083 if (tp->link_config.active_speed == SPEED_100 ||
5084 tp->link_config.active_speed == SPEED_10)
5085 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5086 PCI_EXP_LNKCTL_CLKREQ_EN);
5088 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5089 PCI_EXP_LNKCTL_CLKREQ_EN);
5092 tg3_test_and_report_link_chg(tp, current_link_up);
5097 struct tg3_fiber_aneginfo {
5099 #define ANEG_STATE_UNKNOWN 0
5100 #define ANEG_STATE_AN_ENABLE 1
5101 #define ANEG_STATE_RESTART_INIT 2
5102 #define ANEG_STATE_RESTART 3
5103 #define ANEG_STATE_DISABLE_LINK_OK 4
5104 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5105 #define ANEG_STATE_ABILITY_DETECT 6
5106 #define ANEG_STATE_ACK_DETECT_INIT 7
5107 #define ANEG_STATE_ACK_DETECT 8
5108 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5109 #define ANEG_STATE_COMPLETE_ACK 10
5110 #define ANEG_STATE_IDLE_DETECT_INIT 11
5111 #define ANEG_STATE_IDLE_DETECT 12
5112 #define ANEG_STATE_LINK_OK 13
5113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5114 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5117 #define MR_AN_ENABLE 0x00000001
5118 #define MR_RESTART_AN 0x00000002
5119 #define MR_AN_COMPLETE 0x00000004
5120 #define MR_PAGE_RX 0x00000008
5121 #define MR_NP_LOADED 0x00000010
5122 #define MR_TOGGLE_TX 0x00000020
5123 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5124 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5125 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5126 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5127 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5128 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5129 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5130 #define MR_TOGGLE_RX 0x00002000
5131 #define MR_NP_RX 0x00004000
5133 #define MR_LINK_OK 0x80000000
5135 unsigned long link_time, cur_time;
5137 u32 ability_match_cfg;
5138 int ability_match_count;
5140 char ability_match, idle_match, ack_match;
5142 u32 txconfig, rxconfig;
5143 #define ANEG_CFG_NP 0x00000080
5144 #define ANEG_CFG_ACK 0x00000040
5145 #define ANEG_CFG_RF2 0x00000020
5146 #define ANEG_CFG_RF1 0x00000010
5147 #define ANEG_CFG_PS2 0x00000001
5148 #define ANEG_CFG_PS1 0x00008000
5149 #define ANEG_CFG_HD 0x00004000
5150 #define ANEG_CFG_FD 0x00002000
5151 #define ANEG_CFG_INVAL 0x00001f06
5156 #define ANEG_TIMER_ENAB 2
5157 #define ANEG_FAILED -1
5159 #define ANEG_STATE_SETTLE_TIME 10000
5161 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5162 struct tg3_fiber_aneginfo *ap)
5165 unsigned long delta;
5169 if (ap->state == ANEG_STATE_UNKNOWN) {
5173 ap->ability_match_cfg = 0;
5174 ap->ability_match_count = 0;
5175 ap->ability_match = 0;
5181 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5182 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5184 if (rx_cfg_reg != ap->ability_match_cfg) {
5185 ap->ability_match_cfg = rx_cfg_reg;
5186 ap->ability_match = 0;
5187 ap->ability_match_count = 0;
5189 if (++ap->ability_match_count > 1) {
5190 ap->ability_match = 1;
5191 ap->ability_match_cfg = rx_cfg_reg;
5194 if (rx_cfg_reg & ANEG_CFG_ACK)
5202 ap->ability_match_cfg = 0;
5203 ap->ability_match_count = 0;
5204 ap->ability_match = 0;
5210 ap->rxconfig = rx_cfg_reg;
5213 switch (ap->state) {
5214 case ANEG_STATE_UNKNOWN:
5215 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5216 ap->state = ANEG_STATE_AN_ENABLE;
5219 case ANEG_STATE_AN_ENABLE:
5220 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5221 if (ap->flags & MR_AN_ENABLE) {
5224 ap->ability_match_cfg = 0;
5225 ap->ability_match_count = 0;
5226 ap->ability_match = 0;
5230 ap->state = ANEG_STATE_RESTART_INIT;
5232 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5236 case ANEG_STATE_RESTART_INIT:
5237 ap->link_time = ap->cur_time;
5238 ap->flags &= ~(MR_NP_LOADED);
5240 tw32(MAC_TX_AUTO_NEG, 0);
5241 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5242 tw32_f(MAC_MODE, tp->mac_mode);
5245 ret = ANEG_TIMER_ENAB;
5246 ap->state = ANEG_STATE_RESTART;
5249 case ANEG_STATE_RESTART:
5250 delta = ap->cur_time - ap->link_time;
5251 if (delta > ANEG_STATE_SETTLE_TIME)
5252 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5254 ret = ANEG_TIMER_ENAB;
5257 case ANEG_STATE_DISABLE_LINK_OK:
5261 case ANEG_STATE_ABILITY_DETECT_INIT:
5262 ap->flags &= ~(MR_TOGGLE_TX);
5263 ap->txconfig = ANEG_CFG_FD;
5264 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5265 if (flowctrl & ADVERTISE_1000XPAUSE)
5266 ap->txconfig |= ANEG_CFG_PS1;
5267 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5268 ap->txconfig |= ANEG_CFG_PS2;
5269 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271 tw32_f(MAC_MODE, tp->mac_mode);
5274 ap->state = ANEG_STATE_ABILITY_DETECT;
5277 case ANEG_STATE_ABILITY_DETECT:
5278 if (ap->ability_match != 0 && ap->rxconfig != 0)
5279 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5282 case ANEG_STATE_ACK_DETECT_INIT:
5283 ap->txconfig |= ANEG_CFG_ACK;
5284 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5285 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5286 tw32_f(MAC_MODE, tp->mac_mode);
5289 ap->state = ANEG_STATE_ACK_DETECT;
5292 case ANEG_STATE_ACK_DETECT:
5293 if (ap->ack_match != 0) {
5294 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5295 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5296 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5298 ap->state = ANEG_STATE_AN_ENABLE;
5300 } else if (ap->ability_match != 0 &&
5301 ap->rxconfig == 0) {
5302 ap->state = ANEG_STATE_AN_ENABLE;
5306 case ANEG_STATE_COMPLETE_ACK_INIT:
5307 if (ap->rxconfig & ANEG_CFG_INVAL) {
5311 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5312 MR_LP_ADV_HALF_DUPLEX |
5313 MR_LP_ADV_SYM_PAUSE |
5314 MR_LP_ADV_ASYM_PAUSE |
5315 MR_LP_ADV_REMOTE_FAULT1 |
5316 MR_LP_ADV_REMOTE_FAULT2 |
5317 MR_LP_ADV_NEXT_PAGE |
5320 if (ap->rxconfig & ANEG_CFG_FD)
5321 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5322 if (ap->rxconfig & ANEG_CFG_HD)
5323 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5324 if (ap->rxconfig & ANEG_CFG_PS1)
5325 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5326 if (ap->rxconfig & ANEG_CFG_PS2)
5327 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5328 if (ap->rxconfig & ANEG_CFG_RF1)
5329 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5330 if (ap->rxconfig & ANEG_CFG_RF2)
5331 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5332 if (ap->rxconfig & ANEG_CFG_NP)
5333 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5335 ap->link_time = ap->cur_time;
5337 ap->flags ^= (MR_TOGGLE_TX);
5338 if (ap->rxconfig & 0x0008)
5339 ap->flags |= MR_TOGGLE_RX;
5340 if (ap->rxconfig & ANEG_CFG_NP)
5341 ap->flags |= MR_NP_RX;
5342 ap->flags |= MR_PAGE_RX;
5344 ap->state = ANEG_STATE_COMPLETE_ACK;
5345 ret = ANEG_TIMER_ENAB;
5348 case ANEG_STATE_COMPLETE_ACK:
5349 if (ap->ability_match != 0 &&
5350 ap->rxconfig == 0) {
5351 ap->state = ANEG_STATE_AN_ENABLE;
5354 delta = ap->cur_time - ap->link_time;
5355 if (delta > ANEG_STATE_SETTLE_TIME) {
5356 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5357 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5359 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5360 !(ap->flags & MR_NP_RX)) {
5361 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5369 case ANEG_STATE_IDLE_DETECT_INIT:
5370 ap->link_time = ap->cur_time;
5371 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5372 tw32_f(MAC_MODE, tp->mac_mode);
5375 ap->state = ANEG_STATE_IDLE_DETECT;
5376 ret = ANEG_TIMER_ENAB;
5379 case ANEG_STATE_IDLE_DETECT:
5380 if (ap->ability_match != 0 &&
5381 ap->rxconfig == 0) {
5382 ap->state = ANEG_STATE_AN_ENABLE;
5385 delta = ap->cur_time - ap->link_time;
5386 if (delta > ANEG_STATE_SETTLE_TIME) {
5387 /* XXX another gem from the Broadcom driver :( */
5388 ap->state = ANEG_STATE_LINK_OK;
5392 case ANEG_STATE_LINK_OK:
5393 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5397 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5398 /* ??? unimplemented */
5401 case ANEG_STATE_NEXT_PAGE_WAIT:
5402 /* ??? unimplemented */
5413 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5416 struct tg3_fiber_aneginfo aninfo;
5417 int status = ANEG_FAILED;
5421 tw32_f(MAC_TX_AUTO_NEG, 0);
5423 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5424 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5427 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5430 memset(&aninfo, 0, sizeof(aninfo));
5431 aninfo.flags |= MR_AN_ENABLE;
5432 aninfo.state = ANEG_STATE_UNKNOWN;
5433 aninfo.cur_time = 0;
5435 while (++tick < 195000) {
5436 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5437 if (status == ANEG_DONE || status == ANEG_FAILED)
5443 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5444 tw32_f(MAC_MODE, tp->mac_mode);
5447 *txflags = aninfo.txconfig;
5448 *rxflags = aninfo.flags;
5450 if (status == ANEG_DONE &&
5451 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5452 MR_LP_ADV_FULL_DUPLEX)))
5458 static void tg3_init_bcm8002(struct tg3 *tp)
5460 u32 mac_status = tr32(MAC_STATUS);
5463 /* Reset when initting first time or we have a link. */
5464 if (tg3_flag(tp, INIT_COMPLETE) &&
5465 !(mac_status & MAC_STATUS_PCS_SYNCED))
5468 /* Set PLL lock range. */
5469 tg3_writephy(tp, 0x16, 0x8007);
5472 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5474 /* Wait for reset to complete. */
5475 /* XXX schedule_timeout() ... */
5476 for (i = 0; i < 500; i++)
5479 /* Config mode; select PMA/Ch 1 regs. */
5480 tg3_writephy(tp, 0x10, 0x8411);
5482 /* Enable auto-lock and comdet, select txclk for tx. */
5483 tg3_writephy(tp, 0x11, 0x0a10);
5485 tg3_writephy(tp, 0x18, 0x00a0);
5486 tg3_writephy(tp, 0x16, 0x41ff);
5488 /* Assert and deassert POR. */
5489 tg3_writephy(tp, 0x13, 0x0400);
5491 tg3_writephy(tp, 0x13, 0x0000);
5493 tg3_writephy(tp, 0x11, 0x0a50);
5495 tg3_writephy(tp, 0x11, 0x0a10);
5497 /* Wait for signal to stabilize */
5498 /* XXX schedule_timeout() ... */
5499 for (i = 0; i < 15000; i++)
5502 /* Deselect the channel register so we can read the PHYID
5505 tg3_writephy(tp, 0x10, 0x8011);
5508 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5511 bool current_link_up;
5512 u32 sg_dig_ctrl, sg_dig_status;
5513 u32 serdes_cfg, expected_sg_dig_ctrl;
5514 int workaround, port_a;
5517 expected_sg_dig_ctrl = 0;
5520 current_link_up = false;
5522 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5523 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5525 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5528 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5529 /* preserve bits 20-23 for voltage regulator */
5530 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5533 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5535 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5536 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5538 u32 val = serdes_cfg;
5544 tw32_f(MAC_SERDES_CFG, val);
5547 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5549 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5550 tg3_setup_flow_control(tp, 0, 0);
5551 current_link_up = true;
5556 /* Want auto-negotiation. */
5557 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5559 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5560 if (flowctrl & ADVERTISE_1000XPAUSE)
5561 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5562 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5563 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5565 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5566 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5567 tp->serdes_counter &&
5568 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5569 MAC_STATUS_RCVD_CFG)) ==
5570 MAC_STATUS_PCS_SYNCED)) {
5571 tp->serdes_counter--;
5572 current_link_up = true;
5577 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5578 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5580 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5582 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5583 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5585 MAC_STATUS_SIGNAL_DET)) {
5586 sg_dig_status = tr32(SG_DIG_STATUS);
5587 mac_status = tr32(MAC_STATUS);
5589 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5590 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5591 u32 local_adv = 0, remote_adv = 0;
5593 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5594 local_adv |= ADVERTISE_1000XPAUSE;
5595 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5596 local_adv |= ADVERTISE_1000XPSE_ASYM;
5598 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5599 remote_adv |= LPA_1000XPAUSE;
5600 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5601 remote_adv |= LPA_1000XPAUSE_ASYM;
5603 tp->link_config.rmt_adv =
5604 mii_adv_to_ethtool_adv_x(remote_adv);
5606 tg3_setup_flow_control(tp, local_adv, remote_adv);
5607 current_link_up = true;
5608 tp->serdes_counter = 0;
5609 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5610 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5611 if (tp->serdes_counter)
5612 tp->serdes_counter--;
5615 u32 val = serdes_cfg;
5622 tw32_f(MAC_SERDES_CFG, val);
5625 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5628 /* Link parallel detection - link is up */
5629 /* only if we have PCS_SYNC and not */
5630 /* receiving config code words */
5631 mac_status = tr32(MAC_STATUS);
5632 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5633 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5634 tg3_setup_flow_control(tp, 0, 0);
5635 current_link_up = true;
5637 TG3_PHYFLG_PARALLEL_DETECT;
5638 tp->serdes_counter =
5639 SERDES_PARALLEL_DET_TIMEOUT;
5641 goto restart_autoneg;
5645 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5646 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5650 return current_link_up;
5653 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5655 bool current_link_up = false;
5657 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5660 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5661 u32 txflags, rxflags;
5664 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5665 u32 local_adv = 0, remote_adv = 0;
5667 if (txflags & ANEG_CFG_PS1)
5668 local_adv |= ADVERTISE_1000XPAUSE;
5669 if (txflags & ANEG_CFG_PS2)
5670 local_adv |= ADVERTISE_1000XPSE_ASYM;
5672 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5673 remote_adv |= LPA_1000XPAUSE;
5674 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5675 remote_adv |= LPA_1000XPAUSE_ASYM;
5677 tp->link_config.rmt_adv =
5678 mii_adv_to_ethtool_adv_x(remote_adv);
5680 tg3_setup_flow_control(tp, local_adv, remote_adv);
5682 current_link_up = true;
5684 for (i = 0; i < 30; i++) {
5687 (MAC_STATUS_SYNC_CHANGED |
5688 MAC_STATUS_CFG_CHANGED));
5690 if ((tr32(MAC_STATUS) &
5691 (MAC_STATUS_SYNC_CHANGED |
5692 MAC_STATUS_CFG_CHANGED)) == 0)
5696 mac_status = tr32(MAC_STATUS);
5697 if (!current_link_up &&
5698 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5699 !(mac_status & MAC_STATUS_RCVD_CFG))
5700 current_link_up = true;
5702 tg3_setup_flow_control(tp, 0, 0);
5704 /* Forcing 1000FD link up. */
5705 current_link_up = true;
5707 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5710 tw32_f(MAC_MODE, tp->mac_mode);
5715 return current_link_up;
5718 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5721 u32 orig_active_speed;
5722 u8 orig_active_duplex;
5724 bool current_link_up;
5727 orig_pause_cfg = tp->link_config.active_flowctrl;
5728 orig_active_speed = tp->link_config.active_speed;
5729 orig_active_duplex = tp->link_config.active_duplex;
5731 if (!tg3_flag(tp, HW_AUTONEG) &&
5733 tg3_flag(tp, INIT_COMPLETE)) {
5734 mac_status = tr32(MAC_STATUS);
5735 mac_status &= (MAC_STATUS_PCS_SYNCED |
5736 MAC_STATUS_SIGNAL_DET |
5737 MAC_STATUS_CFG_CHANGED |
5738 MAC_STATUS_RCVD_CFG);
5739 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5740 MAC_STATUS_SIGNAL_DET)) {
5741 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5742 MAC_STATUS_CFG_CHANGED));
5747 tw32_f(MAC_TX_AUTO_NEG, 0);
5749 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5750 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5751 tw32_f(MAC_MODE, tp->mac_mode);
5754 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5755 tg3_init_bcm8002(tp);
5757 /* Enable link change event even when serdes polling. */
5758 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5761 current_link_up = false;
5762 tp->link_config.rmt_adv = 0;
5763 mac_status = tr32(MAC_STATUS);
5765 if (tg3_flag(tp, HW_AUTONEG))
5766 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5768 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5770 tp->napi[0].hw_status->status =
5771 (SD_STATUS_UPDATED |
5772 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5774 for (i = 0; i < 100; i++) {
5775 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5776 MAC_STATUS_CFG_CHANGED));
5778 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5779 MAC_STATUS_CFG_CHANGED |
5780 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5784 mac_status = tr32(MAC_STATUS);
5785 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5786 current_link_up = false;
5787 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5788 tp->serdes_counter == 0) {
5789 tw32_f(MAC_MODE, (tp->mac_mode |
5790 MAC_MODE_SEND_CONFIGS));
5792 tw32_f(MAC_MODE, tp->mac_mode);
5796 if (current_link_up) {
5797 tp->link_config.active_speed = SPEED_1000;
5798 tp->link_config.active_duplex = DUPLEX_FULL;
5799 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800 LED_CTRL_LNKLED_OVERRIDE |
5801 LED_CTRL_1000MBPS_ON));
5803 tp->link_config.active_speed = SPEED_UNKNOWN;
5804 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5805 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5806 LED_CTRL_LNKLED_OVERRIDE |
5807 LED_CTRL_TRAFFIC_OVERRIDE));
5810 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5811 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5812 if (orig_pause_cfg != now_pause_cfg ||
5813 orig_active_speed != tp->link_config.active_speed ||
5814 orig_active_duplex != tp->link_config.active_duplex)
5815 tg3_link_report(tp);
5821 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5825 u32 current_speed = SPEED_UNKNOWN;
5826 u8 current_duplex = DUPLEX_UNKNOWN;
5827 bool current_link_up = false;
5828 u32 local_adv, remote_adv, sgsr;
5830 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5831 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5832 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5833 (sgsr & SERDES_TG3_SGMII_MODE)) {
5838 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5840 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5841 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5843 current_link_up = true;
5844 if (sgsr & SERDES_TG3_SPEED_1000) {
5845 current_speed = SPEED_1000;
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 } else if (sgsr & SERDES_TG3_SPEED_100) {
5848 current_speed = SPEED_100;
5849 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5851 current_speed = SPEED_10;
5852 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5855 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5856 current_duplex = DUPLEX_FULL;
5858 current_duplex = DUPLEX_HALF;
5861 tw32_f(MAC_MODE, tp->mac_mode);
5864 tg3_clear_mac_status(tp);
5866 goto fiber_setup_done;
5869 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5870 tw32_f(MAC_MODE, tp->mac_mode);
5873 tg3_clear_mac_status(tp);
5878 tp->link_config.rmt_adv = 0;
5880 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5881 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5882 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5883 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5884 bmsr |= BMSR_LSTATUS;
5886 bmsr &= ~BMSR_LSTATUS;
5889 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5891 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5892 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5893 /* do nothing, just check for link up at the end */
5894 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5897 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5898 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5899 ADVERTISE_1000XPAUSE |
5900 ADVERTISE_1000XPSE_ASYM |
5903 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5904 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5906 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5907 tg3_writephy(tp, MII_ADVERTISE, newadv);
5908 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5909 tg3_writephy(tp, MII_BMCR, bmcr);
5911 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5912 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5913 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5920 bmcr &= ~BMCR_SPEED1000;
5921 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5923 if (tp->link_config.duplex == DUPLEX_FULL)
5924 new_bmcr |= BMCR_FULLDPLX;
5926 if (new_bmcr != bmcr) {
5927 /* BMCR_SPEED1000 is a reserved bit that needs
5928 * to be set on write.
5930 new_bmcr |= BMCR_SPEED1000;
5932 /* Force a linkdown */
5936 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5937 adv &= ~(ADVERTISE_1000XFULL |
5938 ADVERTISE_1000XHALF |
5940 tg3_writephy(tp, MII_ADVERTISE, adv);
5941 tg3_writephy(tp, MII_BMCR, bmcr |
5945 tg3_carrier_off(tp);
5947 tg3_writephy(tp, MII_BMCR, new_bmcr);
5949 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5950 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5951 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5952 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5953 bmsr |= BMSR_LSTATUS;
5955 bmsr &= ~BMSR_LSTATUS;
5957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5961 if (bmsr & BMSR_LSTATUS) {
5962 current_speed = SPEED_1000;
5963 current_link_up = true;
5964 if (bmcr & BMCR_FULLDPLX)
5965 current_duplex = DUPLEX_FULL;
5967 current_duplex = DUPLEX_HALF;
5972 if (bmcr & BMCR_ANENABLE) {
5975 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5976 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5977 common = local_adv & remote_adv;
5978 if (common & (ADVERTISE_1000XHALF |
5979 ADVERTISE_1000XFULL)) {
5980 if (common & ADVERTISE_1000XFULL)
5981 current_duplex = DUPLEX_FULL;
5983 current_duplex = DUPLEX_HALF;
5985 tp->link_config.rmt_adv =
5986 mii_adv_to_ethtool_adv_x(remote_adv);
5987 } else if (!tg3_flag(tp, 5780_CLASS)) {
5988 /* Link is up via parallel detect */
5990 current_link_up = false;
5996 if (current_link_up && current_duplex == DUPLEX_FULL)
5997 tg3_setup_flow_control(tp, local_adv, remote_adv);
5999 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6000 if (tp->link_config.active_duplex == DUPLEX_HALF)
6001 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6003 tw32_f(MAC_MODE, tp->mac_mode);
6006 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6008 tp->link_config.active_speed = current_speed;
6009 tp->link_config.active_duplex = current_duplex;
6011 tg3_test_and_report_link_chg(tp, current_link_up);
6015 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6017 if (tp->serdes_counter) {
6018 /* Give autoneg time to complete. */
6019 tp->serdes_counter--;
6024 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6027 tg3_readphy(tp, MII_BMCR, &bmcr);
6028 if (bmcr & BMCR_ANENABLE) {
6031 /* Select shadow register 0x1f */
6032 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6033 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 MII_TG3_DSP_EXP1_INT_STAT);
6038 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6041 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6042 /* We have signal detect and not receiving
6043 * config code words, link is up by parallel
6047 bmcr &= ~BMCR_ANENABLE;
6048 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6049 tg3_writephy(tp, MII_BMCR, bmcr);
6050 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6053 } else if (tp->link_up &&
6054 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6055 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6058 /* Select expansion interrupt status register */
6059 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6060 MII_TG3_DSP_EXP1_INT_STAT);
6061 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6065 /* Config code words received, turn on autoneg. */
6066 tg3_readphy(tp, MII_BMCR, &bmcr);
6067 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6069 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6075 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6080 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6081 err = tg3_setup_fiber_phy(tp, force_reset);
6082 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6083 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6085 err = tg3_setup_copper_phy(tp, force_reset);
6087 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6090 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6091 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6093 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6098 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6099 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6100 tw32(GRC_MISC_CFG, val);
6103 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6104 (6 << TX_LENGTHS_IPG_SHIFT);
6105 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6106 tg3_asic_rev(tp) == ASIC_REV_5762)
6107 val |= tr32(MAC_TX_LENGTHS) &
6108 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6109 TX_LENGTHS_CNT_DWN_VAL_MSK);
6111 if (tp->link_config.active_speed == SPEED_1000 &&
6112 tp->link_config.active_duplex == DUPLEX_HALF)
6113 tw32(MAC_TX_LENGTHS, val |
6114 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6116 tw32(MAC_TX_LENGTHS, val |
6117 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6119 if (!tg3_flag(tp, 5705_PLUS)) {
6121 tw32(HOSTCC_STAT_COAL_TICKS,
6122 tp->coal.stats_block_coalesce_usecs);
6124 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6128 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6129 val = tr32(PCIE_PWR_MGMT_THRESH);
6131 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6134 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6135 tw32(PCIE_PWR_MGMT_THRESH, val);
6141 /* tp->lock must be held */
6142 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6146 ptp_read_system_prets(sts);
6147 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6148 ptp_read_system_postts(sts);
6149 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6154 /* tp->lock must be held */
6155 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6157 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6159 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6160 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6161 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6162 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6165 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6166 static inline void tg3_full_unlock(struct tg3 *tp);
6167 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6169 struct tg3 *tp = netdev_priv(dev);
6171 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6172 SOF_TIMESTAMPING_RX_SOFTWARE |
6173 SOF_TIMESTAMPING_SOFTWARE;
6175 if (tg3_flag(tp, PTP_CAPABLE)) {
6176 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6177 SOF_TIMESTAMPING_RX_HARDWARE |
6178 SOF_TIMESTAMPING_RAW_HARDWARE;
6182 info->phc_index = ptp_clock_index(tp->ptp_clock);
6184 info->phc_index = -1;
6186 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6188 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6189 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6190 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6191 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6195 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6197 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6198 bool neg_adj = false;
6206 /* Frequency adjustment is performed using hardware with a 24 bit
6207 * accumulator and a programmable correction value. On each clk, the
6208 * correction value gets added to the accumulator and when it
6209 * overflows, the time counter is incremented/decremented.
6211 * So conversion from ppb to correction value is
6212 * ppb * (1 << 24) / 1000000000
6214 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6215 TG3_EAV_REF_CLK_CORRECT_MASK;
6217 tg3_full_lock(tp, 0);
6220 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6221 TG3_EAV_REF_CLK_CORRECT_EN |
6222 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6224 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6226 tg3_full_unlock(tp);
6231 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6233 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235 tg3_full_lock(tp, 0);
6236 tp->ptp_adjust += delta;
6237 tg3_full_unlock(tp);
6242 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6243 struct ptp_system_timestamp *sts)
6246 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6248 tg3_full_lock(tp, 0);
6249 ns = tg3_refclk_read(tp, sts);
6250 ns += tp->ptp_adjust;
6251 tg3_full_unlock(tp);
6253 *ts = ns_to_timespec64(ns);
6258 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6259 const struct timespec64 *ts)
6262 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6264 ns = timespec64_to_ns(ts);
6266 tg3_full_lock(tp, 0);
6267 tg3_refclk_write(tp, ns);
6269 tg3_full_unlock(tp);
6274 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6275 struct ptp_clock_request *rq, int on)
6277 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6282 case PTP_CLK_REQ_PEROUT:
6283 if (rq->perout.index != 0)
6286 tg3_full_lock(tp, 0);
6287 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6288 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6293 nsec = rq->perout.start.sec * 1000000000ULL +
6294 rq->perout.start.nsec;
6296 if (rq->perout.period.sec || rq->perout.period.nsec) {
6297 netdev_warn(tp->dev,
6298 "Device supports only a one-shot timesync output, period must be 0\n");
6303 if (nsec & (1ULL << 63)) {
6304 netdev_warn(tp->dev,
6305 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6310 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6311 tw32(TG3_EAV_WATCHDOG0_MSB,
6312 TG3_EAV_WATCHDOG0_EN |
6313 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6315 tw32(TG3_EAV_REF_CLCK_CTL,
6316 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6318 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6319 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6323 tg3_full_unlock(tp);
6333 static const struct ptp_clock_info tg3_ptp_caps = {
6334 .owner = THIS_MODULE,
6335 .name = "tg3 clock",
6336 .max_adj = 250000000,
6342 .adjfreq = tg3_ptp_adjfreq,
6343 .adjtime = tg3_ptp_adjtime,
6344 .gettimex64 = tg3_ptp_gettimex,
6345 .settime64 = tg3_ptp_settime,
6346 .enable = tg3_ptp_enable,
6349 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6350 struct skb_shared_hwtstamps *timestamp)
6352 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6353 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6357 /* tp->lock must be held */
6358 static void tg3_ptp_init(struct tg3 *tp)
6360 if (!tg3_flag(tp, PTP_CAPABLE))
6363 /* Initialize the hardware clock to the system time. */
6364 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6366 tp->ptp_info = tg3_ptp_caps;
6369 /* tp->lock must be held */
6370 static void tg3_ptp_resume(struct tg3 *tp)
6372 if (!tg3_flag(tp, PTP_CAPABLE))
6375 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6379 static void tg3_ptp_fini(struct tg3 *tp)
6381 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6384 ptp_clock_unregister(tp->ptp_clock);
6385 tp->ptp_clock = NULL;
6389 static inline int tg3_irq_sync(struct tg3 *tp)
6391 return tp->irq_sync;
6394 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6398 dst = (u32 *)((u8 *)dst + off);
6399 for (i = 0; i < len; i += sizeof(u32))
6400 *dst++ = tr32(off + i);
6403 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6405 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6406 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6407 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6408 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6409 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6410 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6411 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6412 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6413 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6414 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6415 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6416 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6417 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6418 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6419 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6420 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6421 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6422 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6423 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6425 if (tg3_flag(tp, SUPPORT_MSIX))
6426 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6428 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6429 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6430 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6431 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6432 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6433 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6434 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6435 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6437 if (!tg3_flag(tp, 5705_PLUS)) {
6438 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6439 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6440 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6443 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6444 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6445 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6446 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6447 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6449 if (tg3_flag(tp, NVRAM))
6450 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6453 static void tg3_dump_state(struct tg3 *tp)
6458 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6462 if (tg3_flag(tp, PCI_EXPRESS)) {
6463 /* Read up to but not including private PCI registers */
6464 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6465 regs[i / sizeof(u32)] = tr32(i);
6467 tg3_dump_legacy_regs(tp, regs);
6469 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6470 if (!regs[i + 0] && !regs[i + 1] &&
6471 !regs[i + 2] && !regs[i + 3])
6474 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6476 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6481 for (i = 0; i < tp->irq_cnt; i++) {
6482 struct tg3_napi *tnapi = &tp->napi[i];
6484 /* SW status block */
6486 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6488 tnapi->hw_status->status,
6489 tnapi->hw_status->status_tag,
6490 tnapi->hw_status->rx_jumbo_consumer,
6491 tnapi->hw_status->rx_consumer,
6492 tnapi->hw_status->rx_mini_consumer,
6493 tnapi->hw_status->idx[0].rx_producer,
6494 tnapi->hw_status->idx[0].tx_consumer);
6497 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6499 tnapi->last_tag, tnapi->last_irq_tag,
6500 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6502 tnapi->prodring.rx_std_prod_idx,
6503 tnapi->prodring.rx_std_cons_idx,
6504 tnapi->prodring.rx_jmb_prod_idx,
6505 tnapi->prodring.rx_jmb_cons_idx);
6509 /* This is called whenever we suspect that the system chipset is re-
6510 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6511 * is bogus tx completions. We try to recover by setting the
6512 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6515 static void tg3_tx_recover(struct tg3 *tp)
6517 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6518 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6520 netdev_warn(tp->dev,
6521 "The system may be re-ordering memory-mapped I/O "
6522 "cycles to the network device, attempting to recover. "
6523 "Please report the problem to the driver maintainer "
6524 "and include system chipset information.\n");
6526 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6529 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6531 /* Tell compiler to fetch tx indices from memory. */
6533 return tnapi->tx_pending -
6534 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6537 /* Tigon3 never reports partial packet sends. So we do not
6538 * need special logic to handle SKBs that have not had all
6539 * of their frags sent yet, like SunGEM does.
6541 static void tg3_tx(struct tg3_napi *tnapi)
6543 struct tg3 *tp = tnapi->tp;
6544 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6545 u32 sw_idx = tnapi->tx_cons;
6546 struct netdev_queue *txq;
6547 int index = tnapi - tp->napi;
6548 unsigned int pkts_compl = 0, bytes_compl = 0;
6550 if (tg3_flag(tp, ENABLE_TSS))
6553 txq = netdev_get_tx_queue(tp->dev, index);
6555 while (sw_idx != hw_idx) {
6556 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6557 struct sk_buff *skb = ri->skb;
6560 if (unlikely(skb == NULL)) {
6565 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6566 struct skb_shared_hwtstamps timestamp;
6567 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6568 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6570 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6572 skb_tstamp_tx(skb, ×tamp);
6575 pci_unmap_single(tp->pdev,
6576 dma_unmap_addr(ri, mapping),
6582 while (ri->fragmented) {
6583 ri->fragmented = false;
6584 sw_idx = NEXT_TX(sw_idx);
6585 ri = &tnapi->tx_buffers[sw_idx];
6588 sw_idx = NEXT_TX(sw_idx);
6590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6591 ri = &tnapi->tx_buffers[sw_idx];
6592 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6595 pci_unmap_page(tp->pdev,
6596 dma_unmap_addr(ri, mapping),
6597 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6600 while (ri->fragmented) {
6601 ri->fragmented = false;
6602 sw_idx = NEXT_TX(sw_idx);
6603 ri = &tnapi->tx_buffers[sw_idx];
6606 sw_idx = NEXT_TX(sw_idx);
6610 bytes_compl += skb->len;
6612 dev_consume_skb_any(skb);
6614 if (unlikely(tx_bug)) {
6620 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6622 tnapi->tx_cons = sw_idx;
6624 /* Need to make the tx_cons update visible to tg3_start_xmit()
6625 * before checking for netif_queue_stopped(). Without the
6626 * memory barrier, there is a small possibility that tg3_start_xmit()
6627 * will miss it and cause the queue to be stopped forever.
6631 if (unlikely(netif_tx_queue_stopped(txq) &&
6632 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6633 __netif_tx_lock(txq, smp_processor_id());
6634 if (netif_tx_queue_stopped(txq) &&
6635 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6636 netif_tx_wake_queue(txq);
6637 __netif_tx_unlock(txq);
6641 static void tg3_frag_free(bool is_frag, void *data)
6644 skb_free_frag(data);
6649 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6651 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6652 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6657 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6658 map_sz, PCI_DMA_FROMDEVICE);
6659 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6664 /* Returns size of skb allocated or < 0 on error.
6666 * We only need to fill in the address because the other members
6667 * of the RX descriptor are invariant, see tg3_init_rings.
6669 * Note the purposeful assymetry of cpu vs. chip accesses. For
6670 * posting buffers we only dirty the first cache line of the RX
6671 * descriptor (containing the address). Whereas for the RX status
6672 * buffers the cpu only reads the last cacheline of the RX descriptor
6673 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6675 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6676 u32 opaque_key, u32 dest_idx_unmasked,
6677 unsigned int *frag_size)
6679 struct tg3_rx_buffer_desc *desc;
6680 struct ring_info *map;
6683 int skb_size, data_size, dest_idx;
6685 switch (opaque_key) {
6686 case RXD_OPAQUE_RING_STD:
6687 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6688 desc = &tpr->rx_std[dest_idx];
6689 map = &tpr->rx_std_buffers[dest_idx];
6690 data_size = tp->rx_pkt_map_sz;
6693 case RXD_OPAQUE_RING_JUMBO:
6694 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6695 desc = &tpr->rx_jmb[dest_idx].std;
6696 map = &tpr->rx_jmb_buffers[dest_idx];
6697 data_size = TG3_RX_JMB_MAP_SZ;
6704 /* Do not overwrite any of the map or rp information
6705 * until we are sure we can commit to a new buffer.
6707 * Callers depend upon this behavior and assume that
6708 * we leave everything unchanged if we fail.
6710 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6711 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6712 if (skb_size <= PAGE_SIZE) {
6713 data = netdev_alloc_frag(skb_size);
6714 *frag_size = skb_size;
6716 data = kmalloc(skb_size, GFP_ATOMIC);
6722 mapping = pci_map_single(tp->pdev,
6723 data + TG3_RX_OFFSET(tp),
6725 PCI_DMA_FROMDEVICE);
6726 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6727 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6732 dma_unmap_addr_set(map, mapping, mapping);
6734 desc->addr_hi = ((u64)mapping >> 32);
6735 desc->addr_lo = ((u64)mapping & 0xffffffff);
6740 /* We only need to move over in the address because the other
6741 * members of the RX descriptor are invariant. See notes above
6742 * tg3_alloc_rx_data for full details.
6744 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6745 struct tg3_rx_prodring_set *dpr,
6746 u32 opaque_key, int src_idx,
6747 u32 dest_idx_unmasked)
6749 struct tg3 *tp = tnapi->tp;
6750 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6751 struct ring_info *src_map, *dest_map;
6752 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6755 switch (opaque_key) {
6756 case RXD_OPAQUE_RING_STD:
6757 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6758 dest_desc = &dpr->rx_std[dest_idx];
6759 dest_map = &dpr->rx_std_buffers[dest_idx];
6760 src_desc = &spr->rx_std[src_idx];
6761 src_map = &spr->rx_std_buffers[src_idx];
6764 case RXD_OPAQUE_RING_JUMBO:
6765 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6766 dest_desc = &dpr->rx_jmb[dest_idx].std;
6767 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6768 src_desc = &spr->rx_jmb[src_idx].std;
6769 src_map = &spr->rx_jmb_buffers[src_idx];
6776 dest_map->data = src_map->data;
6777 dma_unmap_addr_set(dest_map, mapping,
6778 dma_unmap_addr(src_map, mapping));
6779 dest_desc->addr_hi = src_desc->addr_hi;
6780 dest_desc->addr_lo = src_desc->addr_lo;
6782 /* Ensure that the update to the skb happens after the physical
6783 * addresses have been transferred to the new BD location.
6787 src_map->data = NULL;
6790 /* The RX ring scheme is composed of multiple rings which post fresh
6791 * buffers to the chip, and one special ring the chip uses to report
6792 * status back to the host.
6794 * The special ring reports the status of received packets to the
6795 * host. The chip does not write into the original descriptor the
6796 * RX buffer was obtained from. The chip simply takes the original
6797 * descriptor as provided by the host, updates the status and length
6798 * field, then writes this into the next status ring entry.
6800 * Each ring the host uses to post buffers to the chip is described
6801 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6802 * it is first placed into the on-chip ram. When the packet's length
6803 * is known, it walks down the TG3_BDINFO entries to select the ring.
6804 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6805 * which is within the range of the new packet's length is chosen.
6807 * The "separate ring for rx status" scheme may sound queer, but it makes
6808 * sense from a cache coherency perspective. If only the host writes
6809 * to the buffer post rings, and only the chip writes to the rx status
6810 * rings, then cache lines never move beyond shared-modified state.
6811 * If both the host and chip were to write into the same ring, cache line
6812 * eviction could occur since both entities want it in an exclusive state.
6814 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6816 struct tg3 *tp = tnapi->tp;
6817 u32 work_mask, rx_std_posted = 0;
6818 u32 std_prod_idx, jmb_prod_idx;
6819 u32 sw_idx = tnapi->rx_rcb_ptr;
6822 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6824 hw_idx = *(tnapi->rx_rcb_prod_idx);
6826 * We need to order the read of hw_idx and the read of
6827 * the opaque cookie.
6832 std_prod_idx = tpr->rx_std_prod_idx;
6833 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6834 while (sw_idx != hw_idx && budget > 0) {
6835 struct ring_info *ri;
6836 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6838 struct sk_buff *skb;
6839 dma_addr_t dma_addr;
6840 u32 opaque_key, desc_idx, *post_ptr;
6844 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6845 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6846 if (opaque_key == RXD_OPAQUE_RING_STD) {
6847 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6848 dma_addr = dma_unmap_addr(ri, mapping);
6850 post_ptr = &std_prod_idx;
6852 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6853 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6854 dma_addr = dma_unmap_addr(ri, mapping);
6856 post_ptr = &jmb_prod_idx;
6858 goto next_pkt_nopost;
6860 work_mask |= opaque_key;
6862 if (desc->err_vlan & RXD_ERR_MASK) {
6864 tg3_recycle_rx(tnapi, tpr, opaque_key,
6865 desc_idx, *post_ptr);
6867 /* Other statistics kept track of by card. */
6872 prefetch(data + TG3_RX_OFFSET(tp));
6873 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6876 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6877 RXD_FLAG_PTPSTAT_PTPV1 ||
6878 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6879 RXD_FLAG_PTPSTAT_PTPV2) {
6880 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6881 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6884 if (len > TG3_RX_COPY_THRESH(tp)) {
6886 unsigned int frag_size;
6888 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6889 *post_ptr, &frag_size);
6893 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6894 PCI_DMA_FROMDEVICE);
6896 /* Ensure that the update to the data happens
6897 * after the usage of the old DMA mapping.
6903 skb = build_skb(data, frag_size);
6905 tg3_frag_free(frag_size != 0, data);
6906 goto drop_it_no_recycle;
6908 skb_reserve(skb, TG3_RX_OFFSET(tp));
6910 tg3_recycle_rx(tnapi, tpr, opaque_key,
6911 desc_idx, *post_ptr);
6913 skb = netdev_alloc_skb(tp->dev,
6914 len + TG3_RAW_IP_ALIGN);
6916 goto drop_it_no_recycle;
6918 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6919 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6921 data + TG3_RX_OFFSET(tp),
6923 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6928 tg3_hwclock_to_timestamp(tp, tstamp,
6929 skb_hwtstamps(skb));
6931 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6932 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6933 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6934 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6935 skb->ip_summed = CHECKSUM_UNNECESSARY;
6937 skb_checksum_none_assert(skb);
6939 skb->protocol = eth_type_trans(skb, tp->dev);
6941 if (len > (tp->dev->mtu + ETH_HLEN) &&
6942 skb->protocol != htons(ETH_P_8021Q) &&
6943 skb->protocol != htons(ETH_P_8021AD)) {
6944 dev_kfree_skb_any(skb);
6945 goto drop_it_no_recycle;
6948 if (desc->type_flags & RXD_FLAG_VLAN &&
6949 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6950 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6951 desc->err_vlan & RXD_VLAN_MASK);
6953 napi_gro_receive(&tnapi->napi, skb);
6961 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6962 tpr->rx_std_prod_idx = std_prod_idx &
6963 tp->rx_std_ring_mask;
6964 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6965 tpr->rx_std_prod_idx);
6966 work_mask &= ~RXD_OPAQUE_RING_STD;
6971 sw_idx &= tp->rx_ret_ring_mask;
6973 /* Refresh hw_idx to see if there is new work */
6974 if (sw_idx == hw_idx) {
6975 hw_idx = *(tnapi->rx_rcb_prod_idx);
6980 /* ACK the status ring. */
6981 tnapi->rx_rcb_ptr = sw_idx;
6982 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6984 /* Refill RX ring(s). */
6985 if (!tg3_flag(tp, ENABLE_RSS)) {
6986 /* Sync BD data before updating mailbox */
6989 if (work_mask & RXD_OPAQUE_RING_STD) {
6990 tpr->rx_std_prod_idx = std_prod_idx &
6991 tp->rx_std_ring_mask;
6992 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6993 tpr->rx_std_prod_idx);
6995 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6996 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6997 tp->rx_jmb_ring_mask;
6998 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6999 tpr->rx_jmb_prod_idx);
7001 } else if (work_mask) {
7002 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7003 * updated before the producer indices can be updated.
7007 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7008 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7010 if (tnapi != &tp->napi[1]) {
7011 tp->rx_refill = true;
7012 napi_schedule(&tp->napi[1].napi);
7019 static void tg3_poll_link(struct tg3 *tp)
7021 /* handle link change and other phy events */
7022 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7023 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7025 if (sblk->status & SD_STATUS_LINK_CHG) {
7026 sblk->status = SD_STATUS_UPDATED |
7027 (sblk->status & ~SD_STATUS_LINK_CHG);
7028 spin_lock(&tp->lock);
7029 if (tg3_flag(tp, USE_PHYLIB)) {
7031 (MAC_STATUS_SYNC_CHANGED |
7032 MAC_STATUS_CFG_CHANGED |
7033 MAC_STATUS_MI_COMPLETION |
7034 MAC_STATUS_LNKSTATE_CHANGED));
7037 tg3_setup_phy(tp, false);
7038 spin_unlock(&tp->lock);
7043 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7044 struct tg3_rx_prodring_set *dpr,
7045 struct tg3_rx_prodring_set *spr)
7047 u32 si, di, cpycnt, src_prod_idx;
7051 src_prod_idx = spr->rx_std_prod_idx;
7053 /* Make sure updates to the rx_std_buffers[] entries and the
7054 * standard producer index are seen in the correct order.
7058 if (spr->rx_std_cons_idx == src_prod_idx)
7061 if (spr->rx_std_cons_idx < src_prod_idx)
7062 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7064 cpycnt = tp->rx_std_ring_mask + 1 -
7065 spr->rx_std_cons_idx;
7067 cpycnt = min(cpycnt,
7068 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7070 si = spr->rx_std_cons_idx;
7071 di = dpr->rx_std_prod_idx;
7073 for (i = di; i < di + cpycnt; i++) {
7074 if (dpr->rx_std_buffers[i].data) {
7084 /* Ensure that updates to the rx_std_buffers ring and the
7085 * shadowed hardware producer ring from tg3_recycle_skb() are
7086 * ordered correctly WRT the skb check above.
7090 memcpy(&dpr->rx_std_buffers[di],
7091 &spr->rx_std_buffers[si],
7092 cpycnt * sizeof(struct ring_info));
7094 for (i = 0; i < cpycnt; i++, di++, si++) {
7095 struct tg3_rx_buffer_desc *sbd, *dbd;
7096 sbd = &spr->rx_std[si];
7097 dbd = &dpr->rx_std[di];
7098 dbd->addr_hi = sbd->addr_hi;
7099 dbd->addr_lo = sbd->addr_lo;
7102 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7103 tp->rx_std_ring_mask;
7104 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7105 tp->rx_std_ring_mask;
7109 src_prod_idx = spr->rx_jmb_prod_idx;
7111 /* Make sure updates to the rx_jmb_buffers[] entries and
7112 * the jumbo producer index are seen in the correct order.
7116 if (spr->rx_jmb_cons_idx == src_prod_idx)
7119 if (spr->rx_jmb_cons_idx < src_prod_idx)
7120 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7122 cpycnt = tp->rx_jmb_ring_mask + 1 -
7123 spr->rx_jmb_cons_idx;
7125 cpycnt = min(cpycnt,
7126 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7128 si = spr->rx_jmb_cons_idx;
7129 di = dpr->rx_jmb_prod_idx;
7131 for (i = di; i < di + cpycnt; i++) {
7132 if (dpr->rx_jmb_buffers[i].data) {
7142 /* Ensure that updates to the rx_jmb_buffers ring and the
7143 * shadowed hardware producer ring from tg3_recycle_skb() are
7144 * ordered correctly WRT the skb check above.
7148 memcpy(&dpr->rx_jmb_buffers[di],
7149 &spr->rx_jmb_buffers[si],
7150 cpycnt * sizeof(struct ring_info));
7152 for (i = 0; i < cpycnt; i++, di++, si++) {
7153 struct tg3_rx_buffer_desc *sbd, *dbd;
7154 sbd = &spr->rx_jmb[si].std;
7155 dbd = &dpr->rx_jmb[di].std;
7156 dbd->addr_hi = sbd->addr_hi;
7157 dbd->addr_lo = sbd->addr_lo;
7160 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7161 tp->rx_jmb_ring_mask;
7162 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7163 tp->rx_jmb_ring_mask;
7169 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7171 struct tg3 *tp = tnapi->tp;
7173 /* run TX completion thread */
7174 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7176 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7180 if (!tnapi->rx_rcb_prod_idx)
7183 /* run RX thread, within the bounds set by NAPI.
7184 * All RX "locking" is done by ensuring outside
7185 * code synchronizes with tg3->napi.poll()
7187 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7188 work_done += tg3_rx(tnapi, budget - work_done);
7190 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7191 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7193 u32 std_prod_idx = dpr->rx_std_prod_idx;
7194 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7196 tp->rx_refill = false;
7197 for (i = 1; i <= tp->rxq_cnt; i++)
7198 err |= tg3_rx_prodring_xfer(tp, dpr,
7199 &tp->napi[i].prodring);
7203 if (std_prod_idx != dpr->rx_std_prod_idx)
7204 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7205 dpr->rx_std_prod_idx);
7207 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7208 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7209 dpr->rx_jmb_prod_idx);
7212 tw32_f(HOSTCC_MODE, tp->coal_now);
7218 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7220 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7221 schedule_work(&tp->reset_task);
7224 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7226 cancel_work_sync(&tp->reset_task);
7227 tg3_flag_clear(tp, RESET_TASK_PENDING);
7228 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7231 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7233 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7234 struct tg3 *tp = tnapi->tp;
7236 struct tg3_hw_status *sblk = tnapi->hw_status;
7239 work_done = tg3_poll_work(tnapi, work_done, budget);
7241 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7244 if (unlikely(work_done >= budget))
7247 /* tp->last_tag is used in tg3_int_reenable() below
7248 * to tell the hw how much work has been processed,
7249 * so we must read it before checking for more work.
7251 tnapi->last_tag = sblk->status_tag;
7252 tnapi->last_irq_tag = tnapi->last_tag;
7255 /* check for RX/TX work to do */
7256 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7257 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7259 /* This test here is not race free, but will reduce
7260 * the number of interrupts by looping again.
7262 if (tnapi == &tp->napi[1] && tp->rx_refill)
7265 napi_complete_done(napi, work_done);
7266 /* Reenable interrupts. */
7267 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7269 /* This test here is synchronized by napi_schedule()
7270 * and napi_complete() to close the race condition.
7272 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7273 tw32(HOSTCC_MODE, tp->coalesce_mode |
7274 HOSTCC_MODE_ENABLE |
7281 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7285 /* work_done is guaranteed to be less than budget. */
7286 napi_complete(napi);
7287 tg3_reset_task_schedule(tp);
7291 static void tg3_process_error(struct tg3 *tp)
7294 bool real_error = false;
7296 if (tg3_flag(tp, ERROR_PROCESSED))
7299 /* Check Flow Attention register */
7300 val = tr32(HOSTCC_FLOW_ATTN);
7301 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7302 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7306 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7307 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7311 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7312 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7321 tg3_flag_set(tp, ERROR_PROCESSED);
7322 tg3_reset_task_schedule(tp);
7325 static int tg3_poll(struct napi_struct *napi, int budget)
7327 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7328 struct tg3 *tp = tnapi->tp;
7330 struct tg3_hw_status *sblk = tnapi->hw_status;
7333 if (sblk->status & SD_STATUS_ERROR)
7334 tg3_process_error(tp);
7338 work_done = tg3_poll_work(tnapi, work_done, budget);
7340 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7343 if (unlikely(work_done >= budget))
7346 if (tg3_flag(tp, TAGGED_STATUS)) {
7347 /* tp->last_tag is used in tg3_int_reenable() below
7348 * to tell the hw how much work has been processed,
7349 * so we must read it before checking for more work.
7351 tnapi->last_tag = sblk->status_tag;
7352 tnapi->last_irq_tag = tnapi->last_tag;
7355 sblk->status &= ~SD_STATUS_UPDATED;
7357 if (likely(!tg3_has_work(tnapi))) {
7358 napi_complete_done(napi, work_done);
7359 tg3_int_reenable(tnapi);
7364 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7368 /* work_done is guaranteed to be less than budget. */
7369 napi_complete(napi);
7370 tg3_reset_task_schedule(tp);
7374 static void tg3_napi_disable(struct tg3 *tp)
7378 for (i = tp->irq_cnt - 1; i >= 0; i--)
7379 napi_disable(&tp->napi[i].napi);
7382 static void tg3_napi_enable(struct tg3 *tp)
7386 for (i = 0; i < tp->irq_cnt; i++)
7387 napi_enable(&tp->napi[i].napi);
7390 static void tg3_napi_init(struct tg3 *tp)
7394 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7395 for (i = 1; i < tp->irq_cnt; i++)
7396 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7399 static void tg3_napi_fini(struct tg3 *tp)
7403 for (i = 0; i < tp->irq_cnt; i++)
7404 netif_napi_del(&tp->napi[i].napi);
7407 static inline void tg3_netif_stop(struct tg3 *tp)
7409 netif_trans_update(tp->dev); /* prevent tx timeout */
7410 tg3_napi_disable(tp);
7411 netif_carrier_off(tp->dev);
7412 netif_tx_disable(tp->dev);
7415 /* tp->lock must be held */
7416 static inline void tg3_netif_start(struct tg3 *tp)
7420 /* NOTE: unconditional netif_tx_wake_all_queues is only
7421 * appropriate so long as all callers are assured to
7422 * have free tx slots (such as after tg3_init_hw)
7424 netif_tx_wake_all_queues(tp->dev);
7427 netif_carrier_on(tp->dev);
7429 tg3_napi_enable(tp);
7430 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7431 tg3_enable_ints(tp);
7434 static void tg3_irq_quiesce(struct tg3 *tp)
7435 __releases(tp->lock)
7436 __acquires(tp->lock)
7440 BUG_ON(tp->irq_sync);
7445 spin_unlock_bh(&tp->lock);
7447 for (i = 0; i < tp->irq_cnt; i++)
7448 synchronize_irq(tp->napi[i].irq_vec);
7450 spin_lock_bh(&tp->lock);
7453 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7454 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7455 * with as well. Most of the time, this is not necessary except when
7456 * shutting down the device.
7458 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7460 spin_lock_bh(&tp->lock);
7462 tg3_irq_quiesce(tp);
7465 static inline void tg3_full_unlock(struct tg3 *tp)
7467 spin_unlock_bh(&tp->lock);
7470 /* One-shot MSI handler - Chip automatically disables interrupt
7471 * after sending MSI so driver doesn't have to do it.
7473 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7475 struct tg3_napi *tnapi = dev_id;
7476 struct tg3 *tp = tnapi->tp;
7478 prefetch(tnapi->hw_status);
7480 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7482 if (likely(!tg3_irq_sync(tp)))
7483 napi_schedule(&tnapi->napi);
7488 /* MSI ISR - No need to check for interrupt sharing and no need to
7489 * flush status block and interrupt mailbox. PCI ordering rules
7490 * guarantee that MSI will arrive after the status block.
7492 static irqreturn_t tg3_msi(int irq, void *dev_id)
7494 struct tg3_napi *tnapi = dev_id;
7495 struct tg3 *tp = tnapi->tp;
7497 prefetch(tnapi->hw_status);
7499 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7501 * Writing any value to intr-mbox-0 clears PCI INTA# and
7502 * chip-internal interrupt pending events.
7503 * Writing non-zero to intr-mbox-0 additional tells the
7504 * NIC to stop sending us irqs, engaging "in-intr-handler"
7507 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7508 if (likely(!tg3_irq_sync(tp)))
7509 napi_schedule(&tnapi->napi);
7511 return IRQ_RETVAL(1);
7514 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7516 struct tg3_napi *tnapi = dev_id;
7517 struct tg3 *tp = tnapi->tp;
7518 struct tg3_hw_status *sblk = tnapi->hw_status;
7519 unsigned int handled = 1;
7521 /* In INTx mode, it is possible for the interrupt to arrive at
7522 * the CPU before the status block posted prior to the interrupt.
7523 * Reading the PCI State register will confirm whether the
7524 * interrupt is ours and will flush the status block.
7526 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7527 if (tg3_flag(tp, CHIP_RESETTING) ||
7528 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7535 * Writing any value to intr-mbox-0 clears PCI INTA# and
7536 * chip-internal interrupt pending events.
7537 * Writing non-zero to intr-mbox-0 additional tells the
7538 * NIC to stop sending us irqs, engaging "in-intr-handler"
7541 * Flush the mailbox to de-assert the IRQ immediately to prevent
7542 * spurious interrupts. The flush impacts performance but
7543 * excessive spurious interrupts can be worse in some cases.
7545 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7546 if (tg3_irq_sync(tp))
7548 sblk->status &= ~SD_STATUS_UPDATED;
7549 if (likely(tg3_has_work(tnapi))) {
7550 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7551 napi_schedule(&tnapi->napi);
7553 /* No work, shared interrupt perhaps? re-enable
7554 * interrupts, and flush that PCI write
7556 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7560 return IRQ_RETVAL(handled);
7563 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7565 struct tg3_napi *tnapi = dev_id;
7566 struct tg3 *tp = tnapi->tp;
7567 struct tg3_hw_status *sblk = tnapi->hw_status;
7568 unsigned int handled = 1;
7570 /* In INTx mode, it is possible for the interrupt to arrive at
7571 * the CPU before the status block posted prior to the interrupt.
7572 * Reading the PCI State register will confirm whether the
7573 * interrupt is ours and will flush the status block.
7575 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7576 if (tg3_flag(tp, CHIP_RESETTING) ||
7577 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7584 * writing any value to intr-mbox-0 clears PCI INTA# and
7585 * chip-internal interrupt pending events.
7586 * writing non-zero to intr-mbox-0 additional tells the
7587 * NIC to stop sending us irqs, engaging "in-intr-handler"
7590 * Flush the mailbox to de-assert the IRQ immediately to prevent
7591 * spurious interrupts. The flush impacts performance but
7592 * excessive spurious interrupts can be worse in some cases.
7594 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7597 * In a shared interrupt configuration, sometimes other devices'
7598 * interrupts will scream. We record the current status tag here
7599 * so that the above check can report that the screaming interrupts
7600 * are unhandled. Eventually they will be silenced.
7602 tnapi->last_irq_tag = sblk->status_tag;
7604 if (tg3_irq_sync(tp))
7607 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7609 napi_schedule(&tnapi->napi);
7612 return IRQ_RETVAL(handled);
7615 /* ISR for interrupt test */
7616 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7618 struct tg3_napi *tnapi = dev_id;
7619 struct tg3 *tp = tnapi->tp;
7620 struct tg3_hw_status *sblk = tnapi->hw_status;
7622 if ((sblk->status & SD_STATUS_UPDATED) ||
7623 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7624 tg3_disable_ints(tp);
7625 return IRQ_RETVAL(1);
7627 return IRQ_RETVAL(0);
7630 #ifdef CONFIG_NET_POLL_CONTROLLER
7631 static void tg3_poll_controller(struct net_device *dev)
7634 struct tg3 *tp = netdev_priv(dev);
7636 if (tg3_irq_sync(tp))
7639 for (i = 0; i < tp->irq_cnt; i++)
7640 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7644 static void tg3_tx_timeout(struct net_device *dev)
7646 struct tg3 *tp = netdev_priv(dev);
7648 if (netif_msg_tx_err(tp)) {
7649 netdev_err(dev, "transmit timed out, resetting\n");
7653 tg3_reset_task_schedule(tp);
7656 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7657 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7659 u32 base = (u32) mapping & 0xffffffff;
7661 return base + len + 8 < base;
7664 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7665 * of any 4GB boundaries: 4G, 8G, etc
7667 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7670 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7671 u32 base = (u32) mapping & 0xffffffff;
7673 return ((base + len + (mss & 0x3fff)) < base);
7678 /* Test for DMA addresses > 40-bit */
7679 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7682 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7683 if (tg3_flag(tp, 40BIT_DMA_BUG))
7684 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7691 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7692 dma_addr_t mapping, u32 len, u32 flags,
7695 txbd->addr_hi = ((u64) mapping >> 32);
7696 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7697 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7698 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7701 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7702 dma_addr_t map, u32 len, u32 flags,
7705 struct tg3 *tp = tnapi->tp;
7708 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7711 if (tg3_4g_overflow_test(map, len))
7714 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7717 if (tg3_40bit_overflow_test(tp, map, len))
7720 if (tp->dma_limit) {
7721 u32 prvidx = *entry;
7722 u32 tmp_flag = flags & ~TXD_FLAG_END;
7723 while (len > tp->dma_limit && *budget) {
7724 u32 frag_len = tp->dma_limit;
7725 len -= tp->dma_limit;
7727 /* Avoid the 8byte DMA problem */
7729 len += tp->dma_limit / 2;
7730 frag_len = tp->dma_limit / 2;
7733 tnapi->tx_buffers[*entry].fragmented = true;
7735 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736 frag_len, tmp_flag, mss, vlan);
7739 *entry = NEXT_TX(*entry);
7746 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747 len, flags, mss, vlan);
7749 *entry = NEXT_TX(*entry);
7752 tnapi->tx_buffers[prvidx].fragmented = false;
7756 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7757 len, flags, mss, vlan);
7758 *entry = NEXT_TX(*entry);
7764 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7767 struct sk_buff *skb;
7768 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7773 pci_unmap_single(tnapi->tp->pdev,
7774 dma_unmap_addr(txb, mapping),
7778 while (txb->fragmented) {
7779 txb->fragmented = false;
7780 entry = NEXT_TX(entry);
7781 txb = &tnapi->tx_buffers[entry];
7784 for (i = 0; i <= last; i++) {
7785 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7787 entry = NEXT_TX(entry);
7788 txb = &tnapi->tx_buffers[entry];
7790 pci_unmap_page(tnapi->tp->pdev,
7791 dma_unmap_addr(txb, mapping),
7792 skb_frag_size(frag), PCI_DMA_TODEVICE);
7794 while (txb->fragmented) {
7795 txb->fragmented = false;
7796 entry = NEXT_TX(entry);
7797 txb = &tnapi->tx_buffers[entry];
7802 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7803 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7804 struct sk_buff **pskb,
7805 u32 *entry, u32 *budget,
7806 u32 base_flags, u32 mss, u32 vlan)
7808 struct tg3 *tp = tnapi->tp;
7809 struct sk_buff *new_skb, *skb = *pskb;
7810 dma_addr_t new_addr = 0;
7813 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7814 new_skb = skb_copy(skb, GFP_ATOMIC);
7816 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7818 new_skb = skb_copy_expand(skb,
7819 skb_headroom(skb) + more_headroom,
7820 skb_tailroom(skb), GFP_ATOMIC);
7826 /* New SKB is guaranteed to be linear. */
7827 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7829 /* Make sure the mapping succeeded */
7830 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7831 dev_kfree_skb_any(new_skb);
7834 u32 save_entry = *entry;
7836 base_flags |= TXD_FLAG_END;
7838 tnapi->tx_buffers[*entry].skb = new_skb;
7839 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7842 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7843 new_skb->len, base_flags,
7845 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7846 dev_kfree_skb_any(new_skb);
7852 dev_consume_skb_any(skb);
7857 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7859 /* Check if we will never have enough descriptors,
7860 * as gso_segs can be more than current ring size
7862 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7865 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7867 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7868 * indicated in tg3_tx_frag_set()
7870 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7871 struct netdev_queue *txq, struct sk_buff *skb)
7873 struct sk_buff *segs, *nskb;
7874 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7876 /* Estimate the number of fragments in the worst case */
7877 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7878 netif_tx_stop_queue(txq);
7880 /* netif_tx_stop_queue() must be done before checking
7881 * checking tx index in tg3_tx_avail() below, because in
7882 * tg3_tx(), we update tx index before checking for
7883 * netif_tx_queue_stopped().
7886 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7887 return NETDEV_TX_BUSY;
7889 netif_tx_wake_queue(txq);
7892 segs = skb_gso_segment(skb, tp->dev->features &
7893 ~(NETIF_F_TSO | NETIF_F_TSO6));
7894 if (IS_ERR(segs) || !segs)
7895 goto tg3_tso_bug_end;
7901 tg3_start_xmit(nskb, tp->dev);
7905 dev_consume_skb_any(skb);
7907 return NETDEV_TX_OK;
7910 /* hard_start_xmit for all devices */
7911 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7913 struct tg3 *tp = netdev_priv(dev);
7914 u32 len, entry, base_flags, mss, vlan = 0;
7916 int i = -1, would_hit_hwbug;
7918 struct tg3_napi *tnapi;
7919 struct netdev_queue *txq;
7921 struct iphdr *iph = NULL;
7922 struct tcphdr *tcph = NULL;
7923 __sum16 tcp_csum = 0, ip_csum = 0;
7924 __be16 ip_tot_len = 0;
7926 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7927 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7928 if (tg3_flag(tp, ENABLE_TSS))
7931 budget = tg3_tx_avail(tnapi);
7933 /* We are running in BH disabled context with netif_tx_lock
7934 * and TX reclaim runs via tp->napi.poll inside of a software
7935 * interrupt. Furthermore, IRQ processing runs lockless so we have
7936 * no IRQ context deadlocks to worry about either. Rejoice!
7938 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7939 if (!netif_tx_queue_stopped(txq)) {
7940 netif_tx_stop_queue(txq);
7942 /* This is a hard error, log it. */
7944 "BUG! Tx Ring full when queue awake!\n");
7946 return NETDEV_TX_BUSY;
7949 entry = tnapi->tx_prod;
7952 mss = skb_shinfo(skb)->gso_size;
7954 u32 tcp_opt_len, hdr_len;
7956 if (skb_cow_head(skb, 0))
7960 tcp_opt_len = tcp_optlen(skb);
7962 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7964 /* HW/FW can not correctly segment packets that have been
7965 * vlan encapsulated.
7967 if (skb->protocol == htons(ETH_P_8021Q) ||
7968 skb->protocol == htons(ETH_P_8021AD)) {
7969 if (tg3_tso_bug_gso_check(tnapi, skb))
7970 return tg3_tso_bug(tp, tnapi, txq, skb);
7974 if (!skb_is_gso_v6(skb)) {
7975 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7976 tg3_flag(tp, TSO_BUG)) {
7977 if (tg3_tso_bug_gso_check(tnapi, skb))
7978 return tg3_tso_bug(tp, tnapi, txq, skb);
7981 ip_csum = iph->check;
7982 ip_tot_len = iph->tot_len;
7984 iph->tot_len = htons(mss + hdr_len);
7987 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7988 TXD_FLAG_CPU_POST_DMA);
7990 tcph = tcp_hdr(skb);
7991 tcp_csum = tcph->check;
7993 if (tg3_flag(tp, HW_TSO_1) ||
7994 tg3_flag(tp, HW_TSO_2) ||
7995 tg3_flag(tp, HW_TSO_3)) {
7997 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7999 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8003 if (tg3_flag(tp, HW_TSO_3)) {
8004 mss |= (hdr_len & 0xc) << 12;
8006 base_flags |= 0x00000010;
8007 base_flags |= (hdr_len & 0x3e0) << 5;
8008 } else if (tg3_flag(tp, HW_TSO_2))
8009 mss |= hdr_len << 9;
8010 else if (tg3_flag(tp, HW_TSO_1) ||
8011 tg3_asic_rev(tp) == ASIC_REV_5705) {
8012 if (tcp_opt_len || iph->ihl > 5) {
8015 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8016 mss |= (tsflags << 11);
8019 if (tcp_opt_len || iph->ihl > 5) {
8022 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8023 base_flags |= tsflags << 12;
8026 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8027 /* HW/FW can not correctly checksum packets that have been
8028 * vlan encapsulated.
8030 if (skb->protocol == htons(ETH_P_8021Q) ||
8031 skb->protocol == htons(ETH_P_8021AD)) {
8032 if (skb_checksum_help(skb))
8035 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8039 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8040 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8041 base_flags |= TXD_FLAG_JMB_PKT;
8043 if (skb_vlan_tag_present(skb)) {
8044 base_flags |= TXD_FLAG_VLAN;
8045 vlan = skb_vlan_tag_get(skb);
8048 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8049 tg3_flag(tp, TX_TSTAMP_EN)) {
8050 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8051 base_flags |= TXD_FLAG_HWTSTAMP;
8054 len = skb_headlen(skb);
8056 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8057 if (pci_dma_mapping_error(tp->pdev, mapping))
8061 tnapi->tx_buffers[entry].skb = skb;
8062 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8064 would_hit_hwbug = 0;
8066 if (tg3_flag(tp, 5701_DMA_BUG))
8067 would_hit_hwbug = 1;
8069 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8070 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8072 would_hit_hwbug = 1;
8073 } else if (skb_shinfo(skb)->nr_frags > 0) {
8076 if (!tg3_flag(tp, HW_TSO_1) &&
8077 !tg3_flag(tp, HW_TSO_2) &&
8078 !tg3_flag(tp, HW_TSO_3))
8081 /* Now loop through additional data
8082 * fragments, and queue them.
8084 last = skb_shinfo(skb)->nr_frags - 1;
8085 for (i = 0; i <= last; i++) {
8086 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8088 len = skb_frag_size(frag);
8089 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8090 len, DMA_TO_DEVICE);
8092 tnapi->tx_buffers[entry].skb = NULL;
8093 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8095 if (dma_mapping_error(&tp->pdev->dev, mapping))
8099 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8101 ((i == last) ? TXD_FLAG_END : 0),
8103 would_hit_hwbug = 1;
8109 if (would_hit_hwbug) {
8110 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8112 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8113 /* If it's a TSO packet, do GSO instead of
8114 * allocating and copying to a large linear SKB
8117 iph->check = ip_csum;
8118 iph->tot_len = ip_tot_len;
8120 tcph->check = tcp_csum;
8121 return tg3_tso_bug(tp, tnapi, txq, skb);
8124 /* If the workaround fails due to memory/mapping
8125 * failure, silently drop this packet.
8127 entry = tnapi->tx_prod;
8128 budget = tg3_tx_avail(tnapi);
8129 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8130 base_flags, mss, vlan))
8134 skb_tx_timestamp(skb);
8135 netdev_tx_sent_queue(txq, skb->len);
8137 /* Sync BD data before updating mailbox */
8140 tnapi->tx_prod = entry;
8141 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8142 netif_tx_stop_queue(txq);
8144 /* netif_tx_stop_queue() must be done before checking
8145 * checking tx index in tg3_tx_avail() below, because in
8146 * tg3_tx(), we update tx index before checking for
8147 * netif_tx_queue_stopped().
8150 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8151 netif_tx_wake_queue(txq);
8154 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8155 /* Packets are ready, update Tx producer idx on card. */
8156 tw32_tx_mbox(tnapi->prodmbox, entry);
8159 return NETDEV_TX_OK;
8162 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8163 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8165 dev_kfree_skb_any(skb);
8168 return NETDEV_TX_OK;
8171 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8174 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8175 MAC_MODE_PORT_MODE_MASK);
8177 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8179 if (!tg3_flag(tp, 5705_PLUS))
8180 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8182 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8183 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8185 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8187 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8189 if (tg3_flag(tp, 5705_PLUS) ||
8190 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8191 tg3_asic_rev(tp) == ASIC_REV_5700)
8192 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8195 tw32(MAC_MODE, tp->mac_mode);
8199 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8201 u32 val, bmcr, mac_mode, ptest = 0;
8203 tg3_phy_toggle_apd(tp, false);
8204 tg3_phy_toggle_automdix(tp, false);
8206 if (extlpbk && tg3_phy_set_extloopbk(tp))
8209 bmcr = BMCR_FULLDPLX;
8214 bmcr |= BMCR_SPEED100;
8218 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8220 bmcr |= BMCR_SPEED100;
8223 bmcr |= BMCR_SPEED1000;
8228 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8229 tg3_readphy(tp, MII_CTRL1000, &val);
8230 val |= CTL1000_AS_MASTER |
8231 CTL1000_ENABLE_MASTER;
8232 tg3_writephy(tp, MII_CTRL1000, val);
8234 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8235 MII_TG3_FET_PTEST_TRIM_2;
8236 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8239 bmcr |= BMCR_LOOPBACK;
8241 tg3_writephy(tp, MII_BMCR, bmcr);
8243 /* The write needs to be flushed for the FETs */
8244 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8245 tg3_readphy(tp, MII_BMCR, &bmcr);
8249 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8250 tg3_asic_rev(tp) == ASIC_REV_5785) {
8251 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8252 MII_TG3_FET_PTEST_FRC_TX_LINK |
8253 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8255 /* The write needs to be flushed for the AC131 */
8256 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8259 /* Reset to prevent losing 1st rx packet intermittently */
8260 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8261 tg3_flag(tp, 5780_CLASS)) {
8262 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8264 tw32_f(MAC_RX_MODE, tp->rx_mode);
8267 mac_mode = tp->mac_mode &
8268 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8269 if (speed == SPEED_1000)
8270 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8272 mac_mode |= MAC_MODE_PORT_MODE_MII;
8274 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8275 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8277 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8278 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8279 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8280 mac_mode |= MAC_MODE_LINK_POLARITY;
8282 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8283 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8286 tw32(MAC_MODE, mac_mode);
8292 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8294 struct tg3 *tp = netdev_priv(dev);
8296 if (features & NETIF_F_LOOPBACK) {
8297 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8300 spin_lock_bh(&tp->lock);
8301 tg3_mac_loopback(tp, true);
8302 netif_carrier_on(tp->dev);
8303 spin_unlock_bh(&tp->lock);
8304 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8306 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8309 spin_lock_bh(&tp->lock);
8310 tg3_mac_loopback(tp, false);
8311 /* Force link status check */
8312 tg3_setup_phy(tp, true);
8313 spin_unlock_bh(&tp->lock);
8314 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8318 static netdev_features_t tg3_fix_features(struct net_device *dev,
8319 netdev_features_t features)
8321 struct tg3 *tp = netdev_priv(dev);
8323 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8324 features &= ~NETIF_F_ALL_TSO;
8329 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8331 netdev_features_t changed = dev->features ^ features;
8333 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8334 tg3_set_loopback(dev, features);
8339 static void tg3_rx_prodring_free(struct tg3 *tp,
8340 struct tg3_rx_prodring_set *tpr)
8344 if (tpr != &tp->napi[0].prodring) {
8345 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8346 i = (i + 1) & tp->rx_std_ring_mask)
8347 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8350 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8351 for (i = tpr->rx_jmb_cons_idx;
8352 i != tpr->rx_jmb_prod_idx;
8353 i = (i + 1) & tp->rx_jmb_ring_mask) {
8354 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8362 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8363 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8366 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8367 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8368 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8373 /* Initialize rx rings for packet processing.
8375 * The chip has been shut down and the driver detached from
8376 * the networking, so no interrupts or new tx packets will
8377 * end up in the driver. tp->{tx,}lock are held and thus
8380 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8381 struct tg3_rx_prodring_set *tpr)
8383 u32 i, rx_pkt_dma_sz;
8385 tpr->rx_std_cons_idx = 0;
8386 tpr->rx_std_prod_idx = 0;
8387 tpr->rx_jmb_cons_idx = 0;
8388 tpr->rx_jmb_prod_idx = 0;
8390 if (tpr != &tp->napi[0].prodring) {
8391 memset(&tpr->rx_std_buffers[0], 0,
8392 TG3_RX_STD_BUFF_RING_SIZE(tp));
8393 if (tpr->rx_jmb_buffers)
8394 memset(&tpr->rx_jmb_buffers[0], 0,
8395 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8399 /* Zero out all descriptors. */
8400 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8402 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8403 if (tg3_flag(tp, 5780_CLASS) &&
8404 tp->dev->mtu > ETH_DATA_LEN)
8405 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8406 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8408 /* Initialize invariants of the rings, we only set this
8409 * stuff once. This works because the card does not
8410 * write into the rx buffer posting rings.
8412 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8413 struct tg3_rx_buffer_desc *rxd;
8415 rxd = &tpr->rx_std[i];
8416 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8417 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8418 rxd->opaque = (RXD_OPAQUE_RING_STD |
8419 (i << RXD_OPAQUE_INDEX_SHIFT));
8422 /* Now allocate fresh SKBs for each rx ring. */
8423 for (i = 0; i < tp->rx_pending; i++) {
8424 unsigned int frag_size;
8426 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8428 netdev_warn(tp->dev,
8429 "Using a smaller RX standard ring. Only "
8430 "%d out of %d buffers were allocated "
8431 "successfully\n", i, tp->rx_pending);
8439 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8442 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8444 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8447 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8448 struct tg3_rx_buffer_desc *rxd;
8450 rxd = &tpr->rx_jmb[i].std;
8451 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8452 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8454 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8455 (i << RXD_OPAQUE_INDEX_SHIFT));
8458 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8459 unsigned int frag_size;
8461 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8463 netdev_warn(tp->dev,
8464 "Using a smaller RX jumbo ring. Only %d "
8465 "out of %d buffers were allocated "
8466 "successfully\n", i, tp->rx_jumbo_pending);
8469 tp->rx_jumbo_pending = i;
8478 tg3_rx_prodring_free(tp, tpr);
8482 static void tg3_rx_prodring_fini(struct tg3 *tp,
8483 struct tg3_rx_prodring_set *tpr)
8485 kfree(tpr->rx_std_buffers);
8486 tpr->rx_std_buffers = NULL;
8487 kfree(tpr->rx_jmb_buffers);
8488 tpr->rx_jmb_buffers = NULL;
8490 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8491 tpr->rx_std, tpr->rx_std_mapping);
8495 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8496 tpr->rx_jmb, tpr->rx_jmb_mapping);
8501 static int tg3_rx_prodring_init(struct tg3 *tp,
8502 struct tg3_rx_prodring_set *tpr)
8504 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8506 if (!tpr->rx_std_buffers)
8509 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8510 TG3_RX_STD_RING_BYTES(tp),
8511 &tpr->rx_std_mapping,
8516 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8517 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8519 if (!tpr->rx_jmb_buffers)
8522 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8523 TG3_RX_JMB_RING_BYTES(tp),
8524 &tpr->rx_jmb_mapping,
8533 tg3_rx_prodring_fini(tp, tpr);
8537 /* Free up pending packets in all rx/tx rings.
8539 * The chip has been shut down and the driver detached from
8540 * the networking, so no interrupts or new tx packets will
8541 * end up in the driver. tp->{tx,}lock is not held and we are not
8542 * in an interrupt context and thus may sleep.
8544 static void tg3_free_rings(struct tg3 *tp)
8548 for (j = 0; j < tp->irq_cnt; j++) {
8549 struct tg3_napi *tnapi = &tp->napi[j];
8551 tg3_rx_prodring_free(tp, &tnapi->prodring);
8553 if (!tnapi->tx_buffers)
8556 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8557 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8562 tg3_tx_skb_unmap(tnapi, i,
8563 skb_shinfo(skb)->nr_frags - 1);
8565 dev_consume_skb_any(skb);
8567 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8571 /* Initialize tx/rx rings for packet processing.
8573 * The chip has been shut down and the driver detached from
8574 * the networking, so no interrupts or new tx packets will
8575 * end up in the driver. tp->{tx,}lock are held and thus
8578 static int tg3_init_rings(struct tg3 *tp)
8582 /* Free up all the SKBs. */
8585 for (i = 0; i < tp->irq_cnt; i++) {
8586 struct tg3_napi *tnapi = &tp->napi[i];
8588 tnapi->last_tag = 0;
8589 tnapi->last_irq_tag = 0;
8590 tnapi->hw_status->status = 0;
8591 tnapi->hw_status->status_tag = 0;
8592 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8597 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8599 tnapi->rx_rcb_ptr = 0;
8601 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8603 if (tnapi->prodring.rx_std &&
8604 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8613 static void tg3_mem_tx_release(struct tg3 *tp)
8617 for (i = 0; i < tp->irq_max; i++) {
8618 struct tg3_napi *tnapi = &tp->napi[i];
8620 if (tnapi->tx_ring) {
8621 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8622 tnapi->tx_ring, tnapi->tx_desc_mapping);
8623 tnapi->tx_ring = NULL;
8626 kfree(tnapi->tx_buffers);
8627 tnapi->tx_buffers = NULL;
8631 static int tg3_mem_tx_acquire(struct tg3 *tp)
8634 struct tg3_napi *tnapi = &tp->napi[0];
8636 /* If multivector TSS is enabled, vector 0 does not handle
8637 * tx interrupts. Don't allocate any resources for it.
8639 if (tg3_flag(tp, ENABLE_TSS))
8642 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8643 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8644 sizeof(struct tg3_tx_ring_info),
8646 if (!tnapi->tx_buffers)
8649 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8651 &tnapi->tx_desc_mapping,
8653 if (!tnapi->tx_ring)
8660 tg3_mem_tx_release(tp);
8664 static void tg3_mem_rx_release(struct tg3 *tp)
8668 for (i = 0; i < tp->irq_max; i++) {
8669 struct tg3_napi *tnapi = &tp->napi[i];
8671 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8676 dma_free_coherent(&tp->pdev->dev,
8677 TG3_RX_RCB_RING_BYTES(tp),
8679 tnapi->rx_rcb_mapping);
8680 tnapi->rx_rcb = NULL;
8684 static int tg3_mem_rx_acquire(struct tg3 *tp)
8686 unsigned int i, limit;
8688 limit = tp->rxq_cnt;
8690 /* If RSS is enabled, we need a (dummy) producer ring
8691 * set on vector zero. This is the true hw prodring.
8693 if (tg3_flag(tp, ENABLE_RSS))
8696 for (i = 0; i < limit; i++) {
8697 struct tg3_napi *tnapi = &tp->napi[i];
8699 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8702 /* If multivector RSS is enabled, vector 0
8703 * does not handle rx or tx interrupts.
8704 * Don't allocate any resources for it.
8706 if (!i && tg3_flag(tp, ENABLE_RSS))
8709 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8710 TG3_RX_RCB_RING_BYTES(tp),
8711 &tnapi->rx_rcb_mapping,
8720 tg3_mem_rx_release(tp);
8725 * Must not be invoked with interrupt sources disabled and
8726 * the hardware shutdown down.
8728 static void tg3_free_consistent(struct tg3 *tp)
8732 for (i = 0; i < tp->irq_cnt; i++) {
8733 struct tg3_napi *tnapi = &tp->napi[i];
8735 if (tnapi->hw_status) {
8736 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8738 tnapi->status_mapping);
8739 tnapi->hw_status = NULL;
8743 tg3_mem_rx_release(tp);
8744 tg3_mem_tx_release(tp);
8746 /* tp->hw_stats can be referenced safely:
8747 * 1. under rtnl_lock
8748 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8751 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8752 tp->hw_stats, tp->stats_mapping);
8753 tp->hw_stats = NULL;
8758 * Must not be invoked with interrupt sources disabled and
8759 * the hardware shutdown down. Can sleep.
8761 static int tg3_alloc_consistent(struct tg3 *tp)
8765 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8766 sizeof(struct tg3_hw_stats),
8767 &tp->stats_mapping, GFP_KERNEL);
8771 for (i = 0; i < tp->irq_cnt; i++) {
8772 struct tg3_napi *tnapi = &tp->napi[i];
8773 struct tg3_hw_status *sblk;
8775 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8777 &tnapi->status_mapping,
8779 if (!tnapi->hw_status)
8782 sblk = tnapi->hw_status;
8784 if (tg3_flag(tp, ENABLE_RSS)) {
8785 u16 *prodptr = NULL;
8788 * When RSS is enabled, the status block format changes
8789 * slightly. The "rx_jumbo_consumer", "reserved",
8790 * and "rx_mini_consumer" members get mapped to the
8791 * other three rx return ring producer indexes.
8795 prodptr = &sblk->idx[0].rx_producer;
8798 prodptr = &sblk->rx_jumbo_consumer;
8801 prodptr = &sblk->reserved;
8804 prodptr = &sblk->rx_mini_consumer;
8807 tnapi->rx_rcb_prod_idx = prodptr;
8809 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8813 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8819 tg3_free_consistent(tp);
8823 #define MAX_WAIT_CNT 1000
8825 /* To stop a block, clear the enable bit and poll till it
8826 * clears. tp->lock is held.
8828 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8833 if (tg3_flag(tp, 5705_PLUS)) {
8840 /* We can't enable/disable these bits of the
8841 * 5705/5750, just say success.
8854 for (i = 0; i < MAX_WAIT_CNT; i++) {
8855 if (pci_channel_offline(tp->pdev)) {
8856 dev_err(&tp->pdev->dev,
8857 "tg3_stop_block device offline, "
8858 "ofs=%lx enable_bit=%x\n",
8865 if ((val & enable_bit) == 0)
8869 if (i == MAX_WAIT_CNT && !silent) {
8870 dev_err(&tp->pdev->dev,
8871 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8879 /* tp->lock is held. */
8880 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8884 tg3_disable_ints(tp);
8886 if (pci_channel_offline(tp->pdev)) {
8887 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8888 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8893 tp->rx_mode &= ~RX_MODE_ENABLE;
8894 tw32_f(MAC_RX_MODE, tp->rx_mode);
8897 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8900 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8901 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8902 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8904 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8905 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8906 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8907 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8908 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8909 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8910 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8912 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8913 tw32_f(MAC_MODE, tp->mac_mode);
8916 tp->tx_mode &= ~TX_MODE_ENABLE;
8917 tw32_f(MAC_TX_MODE, tp->tx_mode);
8919 for (i = 0; i < MAX_WAIT_CNT; i++) {
8921 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8924 if (i >= MAX_WAIT_CNT) {
8925 dev_err(&tp->pdev->dev,
8926 "%s timed out, TX_MODE_ENABLE will not clear "
8927 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8931 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8932 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8933 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8935 tw32(FTQ_RESET, 0xffffffff);
8936 tw32(FTQ_RESET, 0x00000000);
8938 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8939 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8942 for (i = 0; i < tp->irq_cnt; i++) {
8943 struct tg3_napi *tnapi = &tp->napi[i];
8944 if (tnapi->hw_status)
8945 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8951 /* Save PCI command register before chip reset */
8952 static void tg3_save_pci_state(struct tg3 *tp)
8954 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8957 /* Restore PCI state after chip reset */
8958 static void tg3_restore_pci_state(struct tg3 *tp)
8962 /* Re-enable indirect register accesses. */
8963 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8964 tp->misc_host_ctrl);
8966 /* Set MAX PCI retry to zero. */
8967 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8968 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8969 tg3_flag(tp, PCIX_MODE))
8970 val |= PCISTATE_RETRY_SAME_DMA;
8971 /* Allow reads and writes to the APE register and memory space. */
8972 if (tg3_flag(tp, ENABLE_APE))
8973 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8974 PCISTATE_ALLOW_APE_SHMEM_WR |
8975 PCISTATE_ALLOW_APE_PSPACE_WR;
8976 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8978 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8980 if (!tg3_flag(tp, PCI_EXPRESS)) {
8981 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8982 tp->pci_cacheline_sz);
8983 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8987 /* Make sure PCI-X relaxed ordering bit is clear. */
8988 if (tg3_flag(tp, PCIX_MODE)) {
8991 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8993 pcix_cmd &= ~PCI_X_CMD_ERO;
8994 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8998 if (tg3_flag(tp, 5780_CLASS)) {
9000 /* Chip reset on 5780 will reset MSI enable bit,
9001 * so need to restore it.
9003 if (tg3_flag(tp, USING_MSI)) {
9006 pci_read_config_word(tp->pdev,
9007 tp->msi_cap + PCI_MSI_FLAGS,
9009 pci_write_config_word(tp->pdev,
9010 tp->msi_cap + PCI_MSI_FLAGS,
9011 ctrl | PCI_MSI_FLAGS_ENABLE);
9012 val = tr32(MSGINT_MODE);
9013 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9018 static void tg3_override_clk(struct tg3 *tp)
9022 switch (tg3_asic_rev(tp)) {
9024 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9025 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9026 TG3_CPMU_MAC_ORIDE_ENABLE);
9031 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9039 static void tg3_restore_clk(struct tg3 *tp)
9043 switch (tg3_asic_rev(tp)) {
9045 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9046 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9047 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9052 val = tr32(TG3_CPMU_CLCK_ORIDE);
9053 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9061 /* tp->lock is held. */
9062 static int tg3_chip_reset(struct tg3 *tp)
9063 __releases(tp->lock)
9064 __acquires(tp->lock)
9067 void (*write_op)(struct tg3 *, u32, u32);
9070 if (!pci_device_is_present(tp->pdev))
9075 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9077 /* No matching tg3_nvram_unlock() after this because
9078 * chip reset below will undo the nvram lock.
9080 tp->nvram_lock_cnt = 0;
9082 /* GRC_MISC_CFG core clock reset will clear the memory
9083 * enable bit in PCI register 4 and the MSI enable bit
9084 * on some chips, so we save relevant registers here.
9086 tg3_save_pci_state(tp);
9088 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9089 tg3_flag(tp, 5755_PLUS))
9090 tw32(GRC_FASTBOOT_PC, 0);
9093 * We must avoid the readl() that normally takes place.
9094 * It locks machines, causes machine checks, and other
9095 * fun things. So, temporarily disable the 5701
9096 * hardware workaround, while we do the reset.
9098 write_op = tp->write32;
9099 if (write_op == tg3_write_flush_reg32)
9100 tp->write32 = tg3_write32;
9102 /* Prevent the irq handler from reading or writing PCI registers
9103 * during chip reset when the memory enable bit in the PCI command
9104 * register may be cleared. The chip does not generate interrupt
9105 * at this time, but the irq handler may still be called due to irq
9106 * sharing or irqpoll.
9108 tg3_flag_set(tp, CHIP_RESETTING);
9109 for (i = 0; i < tp->irq_cnt; i++) {
9110 struct tg3_napi *tnapi = &tp->napi[i];
9111 if (tnapi->hw_status) {
9112 tnapi->hw_status->status = 0;
9113 tnapi->hw_status->status_tag = 0;
9115 tnapi->last_tag = 0;
9116 tnapi->last_irq_tag = 0;
9120 tg3_full_unlock(tp);
9122 for (i = 0; i < tp->irq_cnt; i++)
9123 synchronize_irq(tp->napi[i].irq_vec);
9125 tg3_full_lock(tp, 0);
9127 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9128 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9129 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9133 val = GRC_MISC_CFG_CORECLK_RESET;
9135 if (tg3_flag(tp, PCI_EXPRESS)) {
9136 /* Force PCIe 1.0a mode */
9137 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9138 !tg3_flag(tp, 57765_PLUS) &&
9139 tr32(TG3_PCIE_PHY_TSTCTL) ==
9140 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9141 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9143 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9144 tw32(GRC_MISC_CFG, (1 << 29));
9149 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9150 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9151 tw32(GRC_VCPU_EXT_CTRL,
9152 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9155 /* Set the clock to the highest frequency to avoid timeouts. With link
9156 * aware mode, the clock speed could be slow and bootcode does not
9157 * complete within the expected time. Override the clock to allow the
9158 * bootcode to finish sooner and then restore it.
9160 tg3_override_clk(tp);
9162 /* Manage gphy power for all CPMU absent PCIe devices. */
9163 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9164 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9166 tw32(GRC_MISC_CFG, val);
9168 /* restore 5701 hardware bug workaround write method */
9169 tp->write32 = write_op;
9171 /* Unfortunately, we have to delay before the PCI read back.
9172 * Some 575X chips even will not respond to a PCI cfg access
9173 * when the reset command is given to the chip.
9175 * How do these hardware designers expect things to work
9176 * properly if the PCI write is posted for a long period
9177 * of time? It is always necessary to have some method by
9178 * which a register read back can occur to push the write
9179 * out which does the reset.
9181 * For most tg3 variants the trick below was working.
9186 /* Flush PCI posted writes. The normal MMIO registers
9187 * are inaccessible at this time so this is the only
9188 * way to make this reliably (actually, this is no longer
9189 * the case, see above). I tried to use indirect
9190 * register read/write but this upset some 5701 variants.
9192 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9196 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9199 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9203 /* Wait for link training to complete. */
9204 for (j = 0; j < 5000; j++)
9207 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9208 pci_write_config_dword(tp->pdev, 0xc4,
9209 cfg_val | (1 << 15));
9212 /* Clear the "no snoop" and "relaxed ordering" bits. */
9213 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9215 * Older PCIe devices only support the 128 byte
9216 * MPS setting. Enforce the restriction.
9218 if (!tg3_flag(tp, CPMU_PRESENT))
9219 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9220 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9222 /* Clear error status */
9223 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9224 PCI_EXP_DEVSTA_CED |
9225 PCI_EXP_DEVSTA_NFED |
9226 PCI_EXP_DEVSTA_FED |
9227 PCI_EXP_DEVSTA_URD);
9230 tg3_restore_pci_state(tp);
9232 tg3_flag_clear(tp, CHIP_RESETTING);
9233 tg3_flag_clear(tp, ERROR_PROCESSED);
9236 if (tg3_flag(tp, 5780_CLASS))
9237 val = tr32(MEMARB_MODE);
9238 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9240 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9242 tw32(0x5000, 0x400);
9245 if (tg3_flag(tp, IS_SSB_CORE)) {
9247 * BCM4785: In order to avoid repercussions from using
9248 * potentially defective internal ROM, stop the Rx RISC CPU,
9249 * which is not required.
9252 tg3_halt_cpu(tp, RX_CPU_BASE);
9255 err = tg3_poll_fw(tp);
9259 tw32(GRC_MODE, tp->grc_mode);
9261 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9264 tw32(0xc4, val | (1 << 15));
9267 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9268 tg3_asic_rev(tp) == ASIC_REV_5705) {
9269 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9270 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9271 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9272 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9275 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9276 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9278 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9279 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9284 tw32_f(MAC_MODE, val);
9287 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9291 if (tg3_flag(tp, PCI_EXPRESS) &&
9292 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9293 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9294 !tg3_flag(tp, 57765_PLUS)) {
9297 tw32(0x7c00, val | (1 << 25));
9300 tg3_restore_clk(tp);
9302 /* Increase the core clock speed to fix tx timeout issue for 5762
9303 * with 100Mbps link speed.
9305 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9306 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9307 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9308 TG3_CPMU_MAC_ORIDE_ENABLE);
9311 /* Reprobe ASF enable state. */
9312 tg3_flag_clear(tp, ENABLE_ASF);
9313 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9314 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9316 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9317 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9318 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9321 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9322 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9323 tg3_flag_set(tp, ENABLE_ASF);
9324 tp->last_event_jiffies = jiffies;
9325 if (tg3_flag(tp, 5750_PLUS))
9326 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9328 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9329 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9330 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9331 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9332 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9339 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9340 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9341 static void __tg3_set_rx_mode(struct net_device *);
9343 /* tp->lock is held. */
9344 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9350 tg3_write_sig_pre_reset(tp, kind);
9352 tg3_abort_hw(tp, silent);
9353 err = tg3_chip_reset(tp);
9355 __tg3_set_mac_addr(tp, false);
9357 tg3_write_sig_legacy(tp, kind);
9358 tg3_write_sig_post_reset(tp, kind);
9361 /* Save the stats across chip resets... */
9362 tg3_get_nstats(tp, &tp->net_stats_prev);
9363 tg3_get_estats(tp, &tp->estats_prev);
9365 /* And make sure the next sample is new data */
9366 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9372 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9374 struct tg3 *tp = netdev_priv(dev);
9375 struct sockaddr *addr = p;
9377 bool skip_mac_1 = false;
9379 if (!is_valid_ether_addr(addr->sa_data))
9380 return -EADDRNOTAVAIL;
9382 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9384 if (!netif_running(dev))
9387 if (tg3_flag(tp, ENABLE_ASF)) {
9388 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9390 addr0_high = tr32(MAC_ADDR_0_HIGH);
9391 addr0_low = tr32(MAC_ADDR_0_LOW);
9392 addr1_high = tr32(MAC_ADDR_1_HIGH);
9393 addr1_low = tr32(MAC_ADDR_1_LOW);
9395 /* Skip MAC addr 1 if ASF is using it. */
9396 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9397 !(addr1_high == 0 && addr1_low == 0))
9400 spin_lock_bh(&tp->lock);
9401 __tg3_set_mac_addr(tp, skip_mac_1);
9402 __tg3_set_rx_mode(dev);
9403 spin_unlock_bh(&tp->lock);
9408 /* tp->lock is held. */
9409 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9410 dma_addr_t mapping, u32 maxlen_flags,
9414 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9415 ((u64) mapping >> 32));
9417 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9418 ((u64) mapping & 0xffffffff));
9420 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9423 if (!tg3_flag(tp, 5705_PLUS))
9425 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9430 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9434 if (!tg3_flag(tp, ENABLE_TSS)) {
9435 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9436 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9437 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9439 tw32(HOSTCC_TXCOL_TICKS, 0);
9440 tw32(HOSTCC_TXMAX_FRAMES, 0);
9441 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9443 for (; i < tp->txq_cnt; i++) {
9446 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9447 tw32(reg, ec->tx_coalesce_usecs);
9448 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9449 tw32(reg, ec->tx_max_coalesced_frames);
9450 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9451 tw32(reg, ec->tx_max_coalesced_frames_irq);
9455 for (; i < tp->irq_max - 1; i++) {
9456 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9457 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9458 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9462 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9465 u32 limit = tp->rxq_cnt;
9467 if (!tg3_flag(tp, ENABLE_RSS)) {
9468 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9469 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9470 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9473 tw32(HOSTCC_RXCOL_TICKS, 0);
9474 tw32(HOSTCC_RXMAX_FRAMES, 0);
9475 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9478 for (; i < limit; i++) {
9481 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9482 tw32(reg, ec->rx_coalesce_usecs);
9483 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9484 tw32(reg, ec->rx_max_coalesced_frames);
9485 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9486 tw32(reg, ec->rx_max_coalesced_frames_irq);
9489 for (; i < tp->irq_max - 1; i++) {
9490 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9491 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9492 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9496 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9498 tg3_coal_tx_init(tp, ec);
9499 tg3_coal_rx_init(tp, ec);
9501 if (!tg3_flag(tp, 5705_PLUS)) {
9502 u32 val = ec->stats_block_coalesce_usecs;
9504 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9505 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9510 tw32(HOSTCC_STAT_COAL_TICKS, val);
9514 /* tp->lock is held. */
9515 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9519 /* Disable all transmit rings but the first. */
9520 if (!tg3_flag(tp, 5705_PLUS))
9521 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9522 else if (tg3_flag(tp, 5717_PLUS))
9523 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9524 else if (tg3_flag(tp, 57765_CLASS) ||
9525 tg3_asic_rev(tp) == ASIC_REV_5762)
9526 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9528 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9530 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9531 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9532 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9533 BDINFO_FLAGS_DISABLED);
9536 /* tp->lock is held. */
9537 static void tg3_tx_rcbs_init(struct tg3 *tp)
9540 u32 txrcb = NIC_SRAM_SEND_RCB;
9542 if (tg3_flag(tp, ENABLE_TSS))
9545 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9546 struct tg3_napi *tnapi = &tp->napi[i];
9548 if (!tnapi->tx_ring)
9551 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9552 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9553 NIC_SRAM_TX_BUFFER_DESC);
9557 /* tp->lock is held. */
9558 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9562 /* Disable all receive return rings but the first. */
9563 if (tg3_flag(tp, 5717_PLUS))
9564 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9565 else if (!tg3_flag(tp, 5705_PLUS))
9566 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9567 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9568 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9569 tg3_flag(tp, 57765_CLASS))
9570 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9572 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9574 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9575 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9576 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9577 BDINFO_FLAGS_DISABLED);
9580 /* tp->lock is held. */
9581 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9584 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9586 if (tg3_flag(tp, ENABLE_RSS))
9589 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9590 struct tg3_napi *tnapi = &tp->napi[i];
9595 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9596 (tp->rx_ret_ring_mask + 1) <<
9597 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9601 /* tp->lock is held. */
9602 static void tg3_rings_reset(struct tg3 *tp)
9606 struct tg3_napi *tnapi = &tp->napi[0];
9608 tg3_tx_rcbs_disable(tp);
9610 tg3_rx_ret_rcbs_disable(tp);
9612 /* Disable interrupts */
9613 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9614 tp->napi[0].chk_msi_cnt = 0;
9615 tp->napi[0].last_rx_cons = 0;
9616 tp->napi[0].last_tx_cons = 0;
9618 /* Zero mailbox registers. */
9619 if (tg3_flag(tp, SUPPORT_MSIX)) {
9620 for (i = 1; i < tp->irq_max; i++) {
9621 tp->napi[i].tx_prod = 0;
9622 tp->napi[i].tx_cons = 0;
9623 if (tg3_flag(tp, ENABLE_TSS))
9624 tw32_mailbox(tp->napi[i].prodmbox, 0);
9625 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9626 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9627 tp->napi[i].chk_msi_cnt = 0;
9628 tp->napi[i].last_rx_cons = 0;
9629 tp->napi[i].last_tx_cons = 0;
9631 if (!tg3_flag(tp, ENABLE_TSS))
9632 tw32_mailbox(tp->napi[0].prodmbox, 0);
9634 tp->napi[0].tx_prod = 0;
9635 tp->napi[0].tx_cons = 0;
9636 tw32_mailbox(tp->napi[0].prodmbox, 0);
9637 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9640 /* Make sure the NIC-based send BD rings are disabled. */
9641 if (!tg3_flag(tp, 5705_PLUS)) {
9642 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9643 for (i = 0; i < 16; i++)
9644 tw32_tx_mbox(mbox + i * 8, 0);
9647 /* Clear status block in ram. */
9648 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9650 /* Set status block DMA address */
9651 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9652 ((u64) tnapi->status_mapping >> 32));
9653 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9654 ((u64) tnapi->status_mapping & 0xffffffff));
9656 stblk = HOSTCC_STATBLCK_RING1;
9658 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9659 u64 mapping = (u64)tnapi->status_mapping;
9660 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9661 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9664 /* Clear status block in ram. */
9665 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9668 tg3_tx_rcbs_init(tp);
9669 tg3_rx_ret_rcbs_init(tp);
9672 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9674 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9676 if (!tg3_flag(tp, 5750_PLUS) ||
9677 tg3_flag(tp, 5780_CLASS) ||
9678 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9679 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9680 tg3_flag(tp, 57765_PLUS))
9681 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9682 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9683 tg3_asic_rev(tp) == ASIC_REV_5787)
9684 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9686 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9688 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9689 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9691 val = min(nic_rep_thresh, host_rep_thresh);
9692 tw32(RCVBDI_STD_THRESH, val);
9694 if (tg3_flag(tp, 57765_PLUS))
9695 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9697 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9700 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9702 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9704 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9705 tw32(RCVBDI_JUMBO_THRESH, val);
9707 if (tg3_flag(tp, 57765_PLUS))
9708 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9711 static inline u32 calc_crc(unsigned char *buf, int len)
9719 for (j = 0; j < len; j++) {
9722 for (k = 0; k < 8; k++) {
9728 reg ^= CRC32_POLY_LE;
9735 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9737 /* accept or reject all multicast frames */
9738 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9739 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9740 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9741 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9744 static void __tg3_set_rx_mode(struct net_device *dev)
9746 struct tg3 *tp = netdev_priv(dev);
9749 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9750 RX_MODE_KEEP_VLAN_TAG);
9752 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9753 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9756 if (!tg3_flag(tp, ENABLE_ASF))
9757 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9760 if (dev->flags & IFF_PROMISC) {
9761 /* Promiscuous mode. */
9762 rx_mode |= RX_MODE_PROMISC;
9763 } else if (dev->flags & IFF_ALLMULTI) {
9764 /* Accept all multicast. */
9765 tg3_set_multi(tp, 1);
9766 } else if (netdev_mc_empty(dev)) {
9767 /* Reject all multicast. */
9768 tg3_set_multi(tp, 0);
9770 /* Accept one or more multicast(s). */
9771 struct netdev_hw_addr *ha;
9772 u32 mc_filter[4] = { 0, };
9777 netdev_for_each_mc_addr(ha, dev) {
9778 crc = calc_crc(ha->addr, ETH_ALEN);
9780 regidx = (bit & 0x60) >> 5;
9782 mc_filter[regidx] |= (1 << bit);
9785 tw32(MAC_HASH_REG_0, mc_filter[0]);
9786 tw32(MAC_HASH_REG_1, mc_filter[1]);
9787 tw32(MAC_HASH_REG_2, mc_filter[2]);
9788 tw32(MAC_HASH_REG_3, mc_filter[3]);
9791 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9792 rx_mode |= RX_MODE_PROMISC;
9793 } else if (!(dev->flags & IFF_PROMISC)) {
9794 /* Add all entries into to the mac addr filter list */
9796 struct netdev_hw_addr *ha;
9798 netdev_for_each_uc_addr(ha, dev) {
9799 __tg3_set_one_mac_addr(tp, ha->addr,
9800 i + TG3_UCAST_ADDR_IDX(tp));
9805 if (rx_mode != tp->rx_mode) {
9806 tp->rx_mode = rx_mode;
9807 tw32_f(MAC_RX_MODE, rx_mode);
9812 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9816 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9817 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9820 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9824 if (!tg3_flag(tp, SUPPORT_MSIX))
9827 if (tp->rxq_cnt == 1) {
9828 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9832 /* Validate table against current IRQ count */
9833 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9834 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9838 if (i != TG3_RSS_INDIR_TBL_SIZE)
9839 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9842 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9845 u32 reg = MAC_RSS_INDIR_TBL_0;
9847 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9848 u32 val = tp->rss_ind_tbl[i];
9850 for (; i % 8; i++) {
9852 val |= tp->rss_ind_tbl[i];
9859 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9861 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9862 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9864 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9867 /* tp->lock is held. */
9868 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9870 u32 val, rdmac_mode;
9872 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9874 tg3_disable_ints(tp);
9878 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9880 if (tg3_flag(tp, INIT_COMPLETE))
9881 tg3_abort_hw(tp, 1);
9883 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9884 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9885 tg3_phy_pull_config(tp);
9886 tg3_eee_pull_config(tp, NULL);
9887 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9890 /* Enable MAC control of LPI */
9891 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9897 err = tg3_chip_reset(tp);
9901 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9903 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9904 val = tr32(TG3_CPMU_CTRL);
9905 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9906 tw32(TG3_CPMU_CTRL, val);
9908 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9909 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9910 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9911 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9913 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9914 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9915 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9916 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9918 val = tr32(TG3_CPMU_HST_ACC);
9919 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9920 val |= CPMU_HST_ACC_MACCLK_6_25;
9921 tw32(TG3_CPMU_HST_ACC, val);
9924 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9925 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9926 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9927 PCIE_PWR_MGMT_L1_THRESH_4MS;
9928 tw32(PCIE_PWR_MGMT_THRESH, val);
9930 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9931 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9933 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9935 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9936 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9939 if (tg3_flag(tp, L1PLLPD_EN)) {
9940 u32 grc_mode = tr32(GRC_MODE);
9942 /* Access the lower 1K of PL PCIE block registers. */
9943 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9944 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9946 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9947 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9948 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9950 tw32(GRC_MODE, grc_mode);
9953 if (tg3_flag(tp, 57765_CLASS)) {
9954 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9955 u32 grc_mode = tr32(GRC_MODE);
9957 /* Access the lower 1K of PL PCIE block registers. */
9958 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9959 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9961 val = tr32(TG3_PCIE_TLDLPL_PORT +
9962 TG3_PCIE_PL_LO_PHYCTL5);
9963 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9964 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9966 tw32(GRC_MODE, grc_mode);
9969 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9972 /* Fix transmit hangs */
9973 val = tr32(TG3_CPMU_PADRNG_CTL);
9974 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9975 tw32(TG3_CPMU_PADRNG_CTL, val);
9977 grc_mode = tr32(GRC_MODE);
9979 /* Access the lower 1K of DL PCIE block registers. */
9980 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9981 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9983 val = tr32(TG3_PCIE_TLDLPL_PORT +
9984 TG3_PCIE_DL_LO_FTSMAX);
9985 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9986 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9987 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9989 tw32(GRC_MODE, grc_mode);
9992 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9993 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9994 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9995 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9998 /* This works around an issue with Athlon chipsets on
9999 * B3 tigon3 silicon. This bit has no effect on any
10000 * other revision. But do not set this on PCI Express
10001 * chips and don't even touch the clocks if the CPMU is present.
10003 if (!tg3_flag(tp, CPMU_PRESENT)) {
10004 if (!tg3_flag(tp, PCI_EXPRESS))
10005 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10006 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10009 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10010 tg3_flag(tp, PCIX_MODE)) {
10011 val = tr32(TG3PCI_PCISTATE);
10012 val |= PCISTATE_RETRY_SAME_DMA;
10013 tw32(TG3PCI_PCISTATE, val);
10016 if (tg3_flag(tp, ENABLE_APE)) {
10017 /* Allow reads and writes to the
10018 * APE register and memory space.
10020 val = tr32(TG3PCI_PCISTATE);
10021 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10022 PCISTATE_ALLOW_APE_SHMEM_WR |
10023 PCISTATE_ALLOW_APE_PSPACE_WR;
10024 tw32(TG3PCI_PCISTATE, val);
10027 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10028 /* Enable some hw fixes. */
10029 val = tr32(TG3PCI_MSI_DATA);
10030 val |= (1 << 26) | (1 << 28) | (1 << 29);
10031 tw32(TG3PCI_MSI_DATA, val);
10034 /* Descriptor ring init may make accesses to the
10035 * NIC SRAM area to setup the TX descriptors, so we
10036 * can only do this after the hardware has been
10037 * successfully reset.
10039 err = tg3_init_rings(tp);
10043 if (tg3_flag(tp, 57765_PLUS)) {
10044 val = tr32(TG3PCI_DMA_RW_CTRL) &
10045 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10046 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10047 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10048 if (!tg3_flag(tp, 57765_CLASS) &&
10049 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10050 tg3_asic_rev(tp) != ASIC_REV_5762)
10051 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10052 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10053 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10054 tg3_asic_rev(tp) != ASIC_REV_5761) {
10055 /* This value is determined during the probe time DMA
10056 * engine test, tg3_test_dma.
10058 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10061 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10062 GRC_MODE_4X_NIC_SEND_RINGS |
10063 GRC_MODE_NO_TX_PHDR_CSUM |
10064 GRC_MODE_NO_RX_PHDR_CSUM);
10065 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10067 /* Pseudo-header checksum is done by hardware logic and not
10068 * the offload processers, so make the chip do the pseudo-
10069 * header checksums on receive. For transmit it is more
10070 * convenient to do the pseudo-header checksum in software
10071 * as Linux does that on transmit for us in all cases.
10073 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10075 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10077 tw32(TG3_RX_PTP_CTL,
10078 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10080 if (tg3_flag(tp, PTP_CAPABLE))
10081 val |= GRC_MODE_TIME_SYNC_ENABLE;
10083 tw32(GRC_MODE, tp->grc_mode | val);
10085 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10086 * south bridge limitation. As a workaround, Driver is setting MRRS
10087 * to 2048 instead of default 4096.
10089 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10090 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10091 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10092 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10095 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10096 val = tr32(GRC_MISC_CFG);
10098 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10099 tw32(GRC_MISC_CFG, val);
10101 /* Initialize MBUF/DESC pool. */
10102 if (tg3_flag(tp, 5750_PLUS)) {
10104 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10105 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10106 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10107 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10109 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10110 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10111 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10112 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10115 fw_len = tp->fw_len;
10116 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10117 tw32(BUFMGR_MB_POOL_ADDR,
10118 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10119 tw32(BUFMGR_MB_POOL_SIZE,
10120 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10123 if (tp->dev->mtu <= ETH_DATA_LEN) {
10124 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10125 tp->bufmgr_config.mbuf_read_dma_low_water);
10126 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10127 tp->bufmgr_config.mbuf_mac_rx_low_water);
10128 tw32(BUFMGR_MB_HIGH_WATER,
10129 tp->bufmgr_config.mbuf_high_water);
10131 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10132 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10133 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10134 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10135 tw32(BUFMGR_MB_HIGH_WATER,
10136 tp->bufmgr_config.mbuf_high_water_jumbo);
10138 tw32(BUFMGR_DMA_LOW_WATER,
10139 tp->bufmgr_config.dma_low_water);
10140 tw32(BUFMGR_DMA_HIGH_WATER,
10141 tp->bufmgr_config.dma_high_water);
10143 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10144 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10145 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10146 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10147 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10148 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10149 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10150 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10151 tw32(BUFMGR_MODE, val);
10152 for (i = 0; i < 2000; i++) {
10153 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10158 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10162 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10163 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10165 tg3_setup_rxbd_thresholds(tp);
10167 /* Initialize TG3_BDINFO's at:
10168 * RCVDBDI_STD_BD: standard eth size rx ring
10169 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10170 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10173 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10174 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10175 * ring attribute flags
10176 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10178 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10179 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10181 * The size of each ring is fixed in the firmware, but the location is
10184 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10185 ((u64) tpr->rx_std_mapping >> 32));
10186 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10187 ((u64) tpr->rx_std_mapping & 0xffffffff));
10188 if (!tg3_flag(tp, 5717_PLUS))
10189 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10190 NIC_SRAM_RX_BUFFER_DESC);
10192 /* Disable the mini ring */
10193 if (!tg3_flag(tp, 5705_PLUS))
10194 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10195 BDINFO_FLAGS_DISABLED);
10197 /* Program the jumbo buffer descriptor ring control
10198 * blocks on those devices that have them.
10200 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10201 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10203 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10204 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10205 ((u64) tpr->rx_jmb_mapping >> 32));
10206 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10207 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10208 val = TG3_RX_JMB_RING_SIZE(tp) <<
10209 BDINFO_FLAGS_MAXLEN_SHIFT;
10210 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10211 val | BDINFO_FLAGS_USE_EXT_RECV);
10212 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10213 tg3_flag(tp, 57765_CLASS) ||
10214 tg3_asic_rev(tp) == ASIC_REV_5762)
10215 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10216 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10218 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10219 BDINFO_FLAGS_DISABLED);
10222 if (tg3_flag(tp, 57765_PLUS)) {
10223 val = TG3_RX_STD_RING_SIZE(tp);
10224 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10225 val |= (TG3_RX_STD_DMA_SZ << 2);
10227 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10229 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10231 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10233 tpr->rx_std_prod_idx = tp->rx_pending;
10234 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10236 tpr->rx_jmb_prod_idx =
10237 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10238 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10240 tg3_rings_reset(tp);
10242 /* Initialize MAC address and backoff seed. */
10243 __tg3_set_mac_addr(tp, false);
10245 /* MTU + ethernet header + FCS + optional VLAN tag */
10246 tw32(MAC_RX_MTU_SIZE,
10247 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10249 /* The slot time is changed by tg3_setup_phy if we
10250 * run at gigabit with half duplex.
10252 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10253 (6 << TX_LENGTHS_IPG_SHIFT) |
10254 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10256 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10257 tg3_asic_rev(tp) == ASIC_REV_5762)
10258 val |= tr32(MAC_TX_LENGTHS) &
10259 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10260 TX_LENGTHS_CNT_DWN_VAL_MSK);
10262 tw32(MAC_TX_LENGTHS, val);
10264 /* Receive rules. */
10265 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10266 tw32(RCVLPC_CONFIG, 0x0181);
10268 /* Calculate RDMAC_MODE setting early, we need it to determine
10269 * the RCVLPC_STATE_ENABLE mask.
10271 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10272 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10273 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10274 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10275 RDMAC_MODE_LNGREAD_ENAB);
10277 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10278 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10280 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10281 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10282 tg3_asic_rev(tp) == ASIC_REV_57780)
10283 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10284 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10285 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10287 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10288 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10289 if (tg3_flag(tp, TSO_CAPABLE) &&
10290 tg3_asic_rev(tp) == ASIC_REV_5705) {
10291 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10292 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10293 !tg3_flag(tp, IS_5788)) {
10294 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10298 if (tg3_flag(tp, PCI_EXPRESS))
10299 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10301 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10303 if (tp->dev->mtu <= ETH_DATA_LEN) {
10304 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10305 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10309 if (tg3_flag(tp, HW_TSO_1) ||
10310 tg3_flag(tp, HW_TSO_2) ||
10311 tg3_flag(tp, HW_TSO_3))
10312 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10314 if (tg3_flag(tp, 57765_PLUS) ||
10315 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10316 tg3_asic_rev(tp) == ASIC_REV_57780)
10317 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10319 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10320 tg3_asic_rev(tp) == ASIC_REV_5762)
10321 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10323 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10324 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10325 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10326 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10327 tg3_flag(tp, 57765_PLUS)) {
10330 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10331 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10333 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10335 val = tr32(tgtreg);
10336 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10337 tg3_asic_rev(tp) == ASIC_REV_5762) {
10338 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10339 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10340 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10341 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10342 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10343 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10345 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10348 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10349 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10350 tg3_asic_rev(tp) == ASIC_REV_5762) {
10353 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10354 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10356 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10358 val = tr32(tgtreg);
10360 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10361 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10364 /* Receive/send statistics. */
10365 if (tg3_flag(tp, 5750_PLUS)) {
10366 val = tr32(RCVLPC_STATS_ENABLE);
10367 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10368 tw32(RCVLPC_STATS_ENABLE, val);
10369 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10370 tg3_flag(tp, TSO_CAPABLE)) {
10371 val = tr32(RCVLPC_STATS_ENABLE);
10372 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10373 tw32(RCVLPC_STATS_ENABLE, val);
10375 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10377 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10378 tw32(SNDDATAI_STATSENAB, 0xffffff);
10379 tw32(SNDDATAI_STATSCTRL,
10380 (SNDDATAI_SCTRL_ENABLE |
10381 SNDDATAI_SCTRL_FASTUPD));
10383 /* Setup host coalescing engine. */
10384 tw32(HOSTCC_MODE, 0);
10385 for (i = 0; i < 2000; i++) {
10386 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10391 __tg3_set_coalesce(tp, &tp->coal);
10393 if (!tg3_flag(tp, 5705_PLUS)) {
10394 /* Status/statistics block address. See tg3_timer,
10395 * the tg3_periodic_fetch_stats call there, and
10396 * tg3_get_stats to see how this works for 5705/5750 chips.
10398 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10399 ((u64) tp->stats_mapping >> 32));
10400 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10401 ((u64) tp->stats_mapping & 0xffffffff));
10402 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10404 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10406 /* Clear statistics and status block memory areas */
10407 for (i = NIC_SRAM_STATS_BLK;
10408 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10409 i += sizeof(u32)) {
10410 tg3_write_mem(tp, i, 0);
10415 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10417 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10418 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10419 if (!tg3_flag(tp, 5705_PLUS))
10420 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10422 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10423 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10424 /* reset to prevent losing 1st rx packet intermittently */
10425 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10429 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10430 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10431 MAC_MODE_FHDE_ENABLE;
10432 if (tg3_flag(tp, ENABLE_APE))
10433 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10434 if (!tg3_flag(tp, 5705_PLUS) &&
10435 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10436 tg3_asic_rev(tp) != ASIC_REV_5700)
10437 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10438 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10441 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10442 * If TG3_FLAG_IS_NIC is zero, we should read the
10443 * register to preserve the GPIO settings for LOMs. The GPIOs,
10444 * whether used as inputs or outputs, are set by boot code after
10447 if (!tg3_flag(tp, IS_NIC)) {
10450 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10451 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10452 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10454 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10455 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10456 GRC_LCLCTRL_GPIO_OUTPUT3;
10458 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10459 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10461 tp->grc_local_ctrl &= ~gpio_mask;
10462 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10464 /* GPIO1 must be driven high for eeprom write protect */
10465 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10466 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10467 GRC_LCLCTRL_GPIO_OUTPUT1);
10469 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10472 if (tg3_flag(tp, USING_MSIX)) {
10473 val = tr32(MSGINT_MODE);
10474 val |= MSGINT_MODE_ENABLE;
10475 if (tp->irq_cnt > 1)
10476 val |= MSGINT_MODE_MULTIVEC_EN;
10477 if (!tg3_flag(tp, 1SHOT_MSI))
10478 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10479 tw32(MSGINT_MODE, val);
10482 if (!tg3_flag(tp, 5705_PLUS)) {
10483 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10487 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10488 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10489 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10490 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10491 WDMAC_MODE_LNGREAD_ENAB);
10493 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10494 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10495 if (tg3_flag(tp, TSO_CAPABLE) &&
10496 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10497 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10499 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10500 !tg3_flag(tp, IS_5788)) {
10501 val |= WDMAC_MODE_RX_ACCEL;
10505 /* Enable host coalescing bug fix */
10506 if (tg3_flag(tp, 5755_PLUS))
10507 val |= WDMAC_MODE_STATUS_TAG_FIX;
10509 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10510 val |= WDMAC_MODE_BURST_ALL_DATA;
10512 tw32_f(WDMAC_MODE, val);
10515 if (tg3_flag(tp, PCIX_MODE)) {
10518 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10520 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10521 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10522 pcix_cmd |= PCI_X_CMD_READ_2K;
10523 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10524 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10525 pcix_cmd |= PCI_X_CMD_READ_2K;
10527 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10531 tw32_f(RDMAC_MODE, rdmac_mode);
10534 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10535 tg3_asic_rev(tp) == ASIC_REV_5720) {
10536 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10537 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10540 if (i < TG3_NUM_RDMA_CHANNELS) {
10541 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10542 val |= tg3_lso_rd_dma_workaround_bit(tp);
10543 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10544 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10548 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10549 if (!tg3_flag(tp, 5705_PLUS))
10550 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10552 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10553 tw32(SNDDATAC_MODE,
10554 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10556 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10558 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10559 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10560 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10561 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10562 val |= RCVDBDI_MODE_LRG_RING_SZ;
10563 tw32(RCVDBDI_MODE, val);
10564 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10565 if (tg3_flag(tp, HW_TSO_1) ||
10566 tg3_flag(tp, HW_TSO_2) ||
10567 tg3_flag(tp, HW_TSO_3))
10568 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10569 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10570 if (tg3_flag(tp, ENABLE_TSS))
10571 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10572 tw32(SNDBDI_MODE, val);
10573 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10575 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10576 err = tg3_load_5701_a0_firmware_fix(tp);
10581 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10582 /* Ignore any errors for the firmware download. If download
10583 * fails, the device will operate with EEE disabled
10585 tg3_load_57766_firmware(tp);
10588 if (tg3_flag(tp, TSO_CAPABLE)) {
10589 err = tg3_load_tso_firmware(tp);
10594 tp->tx_mode = TX_MODE_ENABLE;
10596 if (tg3_flag(tp, 5755_PLUS) ||
10597 tg3_asic_rev(tp) == ASIC_REV_5906)
10598 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10600 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10601 tg3_asic_rev(tp) == ASIC_REV_5762) {
10602 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10603 tp->tx_mode &= ~val;
10604 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10607 tw32_f(MAC_TX_MODE, tp->tx_mode);
10610 if (tg3_flag(tp, ENABLE_RSS)) {
10613 tg3_rss_write_indir_tbl(tp);
10615 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10617 for (i = 0; i < 10 ; i++)
10618 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10621 tp->rx_mode = RX_MODE_ENABLE;
10622 if (tg3_flag(tp, 5755_PLUS))
10623 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10625 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10626 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10628 if (tg3_flag(tp, ENABLE_RSS))
10629 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10630 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10631 RX_MODE_RSS_IPV6_HASH_EN |
10632 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10633 RX_MODE_RSS_IPV4_HASH_EN |
10634 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10636 tw32_f(MAC_RX_MODE, tp->rx_mode);
10639 tw32(MAC_LED_CTRL, tp->led_ctrl);
10641 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10642 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10643 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10646 tw32_f(MAC_RX_MODE, tp->rx_mode);
10649 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10650 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10651 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10652 /* Set drive transmission level to 1.2V */
10653 /* only if the signal pre-emphasis bit is not set */
10654 val = tr32(MAC_SERDES_CFG);
10657 tw32(MAC_SERDES_CFG, val);
10659 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10660 tw32(MAC_SERDES_CFG, 0x616000);
10663 /* Prevent chip from dropping frames when flow control
10666 if (tg3_flag(tp, 57765_CLASS))
10670 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10672 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10673 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10674 /* Use hardware link auto-negotiation */
10675 tg3_flag_set(tp, HW_AUTONEG);
10678 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10679 tg3_asic_rev(tp) == ASIC_REV_5714) {
10682 tmp = tr32(SERDES_RX_CTRL);
10683 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10684 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10685 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10686 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10689 if (!tg3_flag(tp, USE_PHYLIB)) {
10690 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10691 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10693 err = tg3_setup_phy(tp, false);
10697 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10698 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10701 /* Clear CRC stats. */
10702 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10703 tg3_writephy(tp, MII_TG3_TEST1,
10704 tmp | MII_TG3_TEST1_CRC_EN);
10705 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10710 __tg3_set_rx_mode(tp->dev);
10712 /* Initialize receive rules. */
10713 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10714 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10715 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10716 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10718 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10722 if (tg3_flag(tp, ENABLE_ASF))
10726 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10729 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10732 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10735 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10738 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10741 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10744 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10747 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10750 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10753 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10756 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10759 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10762 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10764 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10772 if (tg3_flag(tp, ENABLE_APE))
10773 /* Write our heartbeat update interval to APE. */
10774 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10775 APE_HOST_HEARTBEAT_INT_5SEC);
10777 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10782 /* Called at device open time to get the chip ready for
10783 * packet processing. Invoked with tp->lock held.
10785 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10787 /* Chip may have been just powered on. If so, the boot code may still
10788 * be running initialization. Wait for it to finish to avoid races in
10789 * accessing the hardware.
10791 tg3_enable_register_access(tp);
10794 tg3_switch_clocks(tp);
10796 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10798 return tg3_reset_hw(tp, reset_phy);
10801 #ifdef CONFIG_TIGON3_HWMON
10802 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10806 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10807 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10809 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10812 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10813 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10814 memset(ocir, 0, TG3_OCIR_LEN);
10818 /* sysfs attributes for hwmon */
10819 static ssize_t tg3_show_temp(struct device *dev,
10820 struct device_attribute *devattr, char *buf)
10822 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10823 struct tg3 *tp = dev_get_drvdata(dev);
10826 spin_lock_bh(&tp->lock);
10827 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10828 sizeof(temperature));
10829 spin_unlock_bh(&tp->lock);
10830 return sprintf(buf, "%u\n", temperature * 1000);
10834 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10835 TG3_TEMP_SENSOR_OFFSET);
10836 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10837 TG3_TEMP_CAUTION_OFFSET);
10838 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10839 TG3_TEMP_MAX_OFFSET);
10841 static struct attribute *tg3_attrs[] = {
10842 &sensor_dev_attr_temp1_input.dev_attr.attr,
10843 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10844 &sensor_dev_attr_temp1_max.dev_attr.attr,
10847 ATTRIBUTE_GROUPS(tg3);
10849 static void tg3_hwmon_close(struct tg3 *tp)
10851 if (tp->hwmon_dev) {
10852 hwmon_device_unregister(tp->hwmon_dev);
10853 tp->hwmon_dev = NULL;
10857 static void tg3_hwmon_open(struct tg3 *tp)
10861 struct pci_dev *pdev = tp->pdev;
10862 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10864 tg3_sd_scan_scratchpad(tp, ocirs);
10866 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10867 if (!ocirs[i].src_data_length)
10870 size += ocirs[i].src_hdr_length;
10871 size += ocirs[i].src_data_length;
10877 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10879 if (IS_ERR(tp->hwmon_dev)) {
10880 tp->hwmon_dev = NULL;
10881 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10885 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10886 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10887 #endif /* CONFIG_TIGON3_HWMON */
10890 #define TG3_STAT_ADD32(PSTAT, REG) \
10891 do { u32 __val = tr32(REG); \
10892 (PSTAT)->low += __val; \
10893 if ((PSTAT)->low < __val) \
10894 (PSTAT)->high += 1; \
10897 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10899 struct tg3_hw_stats *sp = tp->hw_stats;
10904 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10905 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10906 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10907 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10908 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10909 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10910 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10911 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10912 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10913 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10914 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10915 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10916 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10917 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10918 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10919 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10922 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10923 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10924 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10925 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10928 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10929 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10930 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10931 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10932 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10933 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10934 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10935 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10936 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10937 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10938 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10939 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10940 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10941 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10943 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10944 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10945 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10946 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10947 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10948 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10950 u32 val = tr32(HOSTCC_FLOW_ATTN);
10951 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10953 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10954 sp->rx_discards.low += val;
10955 if (sp->rx_discards.low < val)
10956 sp->rx_discards.high += 1;
10958 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10960 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10963 static void tg3_chk_missed_msi(struct tg3 *tp)
10967 for (i = 0; i < tp->irq_cnt; i++) {
10968 struct tg3_napi *tnapi = &tp->napi[i];
10970 if (tg3_has_work(tnapi)) {
10971 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10972 tnapi->last_tx_cons == tnapi->tx_cons) {
10973 if (tnapi->chk_msi_cnt < 1) {
10974 tnapi->chk_msi_cnt++;
10980 tnapi->chk_msi_cnt = 0;
10981 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10982 tnapi->last_tx_cons = tnapi->tx_cons;
10986 static void tg3_timer(struct timer_list *t)
10988 struct tg3 *tp = from_timer(tp, t, timer);
10990 spin_lock(&tp->lock);
10992 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10993 spin_unlock(&tp->lock);
10994 goto restart_timer;
10997 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10998 tg3_flag(tp, 57765_CLASS))
10999 tg3_chk_missed_msi(tp);
11001 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11002 /* BCM4785: Flush posted writes from GbE to host memory. */
11006 if (!tg3_flag(tp, TAGGED_STATUS)) {
11007 /* All of this garbage is because when using non-tagged
11008 * IRQ status the mailbox/status_block protocol the chip
11009 * uses with the cpu is race prone.
11011 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11012 tw32(GRC_LOCAL_CTRL,
11013 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11015 tw32(HOSTCC_MODE, tp->coalesce_mode |
11016 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11019 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11020 spin_unlock(&tp->lock);
11021 tg3_reset_task_schedule(tp);
11022 goto restart_timer;
11026 /* This part only runs once per second. */
11027 if (!--tp->timer_counter) {
11028 if (tg3_flag(tp, 5705_PLUS))
11029 tg3_periodic_fetch_stats(tp);
11031 if (tp->setlpicnt && !--tp->setlpicnt)
11032 tg3_phy_eee_enable(tp);
11034 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11038 mac_stat = tr32(MAC_STATUS);
11041 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11042 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11044 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11048 tg3_setup_phy(tp, false);
11049 } else if (tg3_flag(tp, POLL_SERDES)) {
11050 u32 mac_stat = tr32(MAC_STATUS);
11051 int need_setup = 0;
11054 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11057 if (!tp->link_up &&
11058 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11059 MAC_STATUS_SIGNAL_DET))) {
11063 if (!tp->serdes_counter) {
11066 ~MAC_MODE_PORT_MODE_MASK));
11068 tw32_f(MAC_MODE, tp->mac_mode);
11071 tg3_setup_phy(tp, false);
11073 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11074 tg3_flag(tp, 5780_CLASS)) {
11075 tg3_serdes_parallel_detect(tp);
11076 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11077 u32 cpmu = tr32(TG3_CPMU_STATUS);
11078 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11079 TG3_CPMU_STATUS_LINK_MASK);
11081 if (link_up != tp->link_up)
11082 tg3_setup_phy(tp, false);
11085 tp->timer_counter = tp->timer_multiplier;
11088 /* Heartbeat is only sent once every 2 seconds.
11090 * The heartbeat is to tell the ASF firmware that the host
11091 * driver is still alive. In the event that the OS crashes,
11092 * ASF needs to reset the hardware to free up the FIFO space
11093 * that may be filled with rx packets destined for the host.
11094 * If the FIFO is full, ASF will no longer function properly.
11096 * Unintended resets have been reported on real time kernels
11097 * where the timer doesn't run on time. Netpoll will also have
11100 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11101 * to check the ring condition when the heartbeat is expiring
11102 * before doing the reset. This will prevent most unintended
11105 if (!--tp->asf_counter) {
11106 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11107 tg3_wait_for_event_ack(tp);
11109 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11110 FWCMD_NICDRV_ALIVE3);
11111 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11112 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11113 TG3_FW_UPDATE_TIMEOUT_SEC);
11115 tg3_generate_fw_event(tp);
11117 tp->asf_counter = tp->asf_multiplier;
11120 /* Update the APE heartbeat every 5 seconds.*/
11121 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11123 spin_unlock(&tp->lock);
11126 tp->timer.expires = jiffies + tp->timer_offset;
11127 add_timer(&tp->timer);
11130 static void tg3_timer_init(struct tg3 *tp)
11132 if (tg3_flag(tp, TAGGED_STATUS) &&
11133 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11134 !tg3_flag(tp, 57765_CLASS))
11135 tp->timer_offset = HZ;
11137 tp->timer_offset = HZ / 10;
11139 BUG_ON(tp->timer_offset > HZ);
11141 tp->timer_multiplier = (HZ / tp->timer_offset);
11142 tp->asf_multiplier = (HZ / tp->timer_offset) *
11143 TG3_FW_UPDATE_FREQ_SEC;
11145 timer_setup(&tp->timer, tg3_timer, 0);
11148 static void tg3_timer_start(struct tg3 *tp)
11150 tp->asf_counter = tp->asf_multiplier;
11151 tp->timer_counter = tp->timer_multiplier;
11153 tp->timer.expires = jiffies + tp->timer_offset;
11154 add_timer(&tp->timer);
11157 static void tg3_timer_stop(struct tg3 *tp)
11159 del_timer_sync(&tp->timer);
11162 /* Restart hardware after configuration changes, self-test, etc.
11163 * Invoked with tp->lock held.
11165 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11166 __releases(tp->lock)
11167 __acquires(tp->lock)
11171 err = tg3_init_hw(tp, reset_phy);
11173 netdev_err(tp->dev,
11174 "Failed to re-initialize device, aborting\n");
11175 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11176 tg3_full_unlock(tp);
11177 tg3_timer_stop(tp);
11179 tg3_napi_enable(tp);
11180 dev_close(tp->dev);
11181 tg3_full_lock(tp, 0);
11186 static void tg3_reset_task(struct work_struct *work)
11188 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11192 tg3_full_lock(tp, 0);
11194 if (!netif_running(tp->dev)) {
11195 tg3_flag_clear(tp, RESET_TASK_PENDING);
11196 tg3_full_unlock(tp);
11201 tg3_full_unlock(tp);
11205 tg3_netif_stop(tp);
11207 tg3_full_lock(tp, 1);
11209 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11210 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11211 tp->write32_rx_mbox = tg3_write_flush_reg32;
11212 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11213 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11216 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11217 err = tg3_init_hw(tp, true);
11221 tg3_netif_start(tp);
11224 tg3_full_unlock(tp);
11229 tg3_flag_clear(tp, RESET_TASK_PENDING);
11233 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11236 unsigned long flags;
11238 struct tg3_napi *tnapi = &tp->napi[irq_num];
11240 if (tp->irq_cnt == 1)
11241 name = tp->dev->name;
11243 name = &tnapi->irq_lbl[0];
11244 if (tnapi->tx_buffers && tnapi->rx_rcb)
11245 snprintf(name, IFNAMSIZ,
11246 "%s-txrx-%d", tp->dev->name, irq_num);
11247 else if (tnapi->tx_buffers)
11248 snprintf(name, IFNAMSIZ,
11249 "%s-tx-%d", tp->dev->name, irq_num);
11250 else if (tnapi->rx_rcb)
11251 snprintf(name, IFNAMSIZ,
11252 "%s-rx-%d", tp->dev->name, irq_num);
11254 snprintf(name, IFNAMSIZ,
11255 "%s-%d", tp->dev->name, irq_num);
11256 name[IFNAMSIZ-1] = 0;
11259 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11261 if (tg3_flag(tp, 1SHOT_MSI))
11262 fn = tg3_msi_1shot;
11265 fn = tg3_interrupt;
11266 if (tg3_flag(tp, TAGGED_STATUS))
11267 fn = tg3_interrupt_tagged;
11268 flags = IRQF_SHARED;
11271 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11274 static int tg3_test_interrupt(struct tg3 *tp)
11276 struct tg3_napi *tnapi = &tp->napi[0];
11277 struct net_device *dev = tp->dev;
11278 int err, i, intr_ok = 0;
11281 if (!netif_running(dev))
11284 tg3_disable_ints(tp);
11286 free_irq(tnapi->irq_vec, tnapi);
11289 * Turn off MSI one shot mode. Otherwise this test has no
11290 * observable way to know whether the interrupt was delivered.
11292 if (tg3_flag(tp, 57765_PLUS)) {
11293 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11294 tw32(MSGINT_MODE, val);
11297 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11298 IRQF_SHARED, dev->name, tnapi);
11302 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11303 tg3_enable_ints(tp);
11305 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11308 for (i = 0; i < 5; i++) {
11309 u32 int_mbox, misc_host_ctrl;
11311 int_mbox = tr32_mailbox(tnapi->int_mbox);
11312 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11314 if ((int_mbox != 0) ||
11315 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11320 if (tg3_flag(tp, 57765_PLUS) &&
11321 tnapi->hw_status->status_tag != tnapi->last_tag)
11322 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11327 tg3_disable_ints(tp);
11329 free_irq(tnapi->irq_vec, tnapi);
11331 err = tg3_request_irq(tp, 0);
11337 /* Reenable MSI one shot mode. */
11338 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11339 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11340 tw32(MSGINT_MODE, val);
11348 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11349 * successfully restored
11351 static int tg3_test_msi(struct tg3 *tp)
11356 if (!tg3_flag(tp, USING_MSI))
11359 /* Turn off SERR reporting in case MSI terminates with Master
11362 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11363 pci_write_config_word(tp->pdev, PCI_COMMAND,
11364 pci_cmd & ~PCI_COMMAND_SERR);
11366 err = tg3_test_interrupt(tp);
11368 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11373 /* other failures */
11377 /* MSI test failed, go back to INTx mode */
11378 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11379 "to INTx mode. Please report this failure to the PCI "
11380 "maintainer and include system chipset information\n");
11382 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11384 pci_disable_msi(tp->pdev);
11386 tg3_flag_clear(tp, USING_MSI);
11387 tp->napi[0].irq_vec = tp->pdev->irq;
11389 err = tg3_request_irq(tp, 0);
11393 /* Need to reset the chip because the MSI cycle may have terminated
11394 * with Master Abort.
11396 tg3_full_lock(tp, 1);
11398 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11399 err = tg3_init_hw(tp, true);
11401 tg3_full_unlock(tp);
11404 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11409 static int tg3_request_firmware(struct tg3 *tp)
11411 const struct tg3_firmware_hdr *fw_hdr;
11413 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11414 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11419 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11421 /* Firmware blob starts with version numbers, followed by
11422 * start address and _full_ length including BSS sections
11423 * (which must be longer than the actual data, of course
11426 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11427 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11428 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11429 tp->fw_len, tp->fw_needed);
11430 release_firmware(tp->fw);
11435 /* We no longer need firmware; we have it. */
11436 tp->fw_needed = NULL;
11440 static u32 tg3_irq_count(struct tg3 *tp)
11442 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11445 /* We want as many rx rings enabled as there are cpus.
11446 * In multiqueue MSI-X mode, the first MSI-X vector
11447 * only deals with link interrupts, etc, so we add
11448 * one to the number of vectors we are requesting.
11450 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11456 static bool tg3_enable_msix(struct tg3 *tp)
11459 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11461 tp->txq_cnt = tp->txq_req;
11462 tp->rxq_cnt = tp->rxq_req;
11464 tp->rxq_cnt = netif_get_num_default_rss_queues();
11465 if (tp->rxq_cnt > tp->rxq_max)
11466 tp->rxq_cnt = tp->rxq_max;
11468 /* Disable multiple TX rings by default. Simple round-robin hardware
11469 * scheduling of the TX rings can cause starvation of rings with
11470 * small packets when other rings have TSO or jumbo packets.
11475 tp->irq_cnt = tg3_irq_count(tp);
11477 for (i = 0; i < tp->irq_max; i++) {
11478 msix_ent[i].entry = i;
11479 msix_ent[i].vector = 0;
11482 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11485 } else if (rc < tp->irq_cnt) {
11486 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11489 tp->rxq_cnt = max(rc - 1, 1);
11491 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11494 for (i = 0; i < tp->irq_max; i++)
11495 tp->napi[i].irq_vec = msix_ent[i].vector;
11497 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11498 pci_disable_msix(tp->pdev);
11502 if (tp->irq_cnt == 1)
11505 tg3_flag_set(tp, ENABLE_RSS);
11507 if (tp->txq_cnt > 1)
11508 tg3_flag_set(tp, ENABLE_TSS);
11510 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11515 static void tg3_ints_init(struct tg3 *tp)
11517 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11518 !tg3_flag(tp, TAGGED_STATUS)) {
11519 /* All MSI supporting chips should support tagged
11520 * status. Assert that this is the case.
11522 netdev_warn(tp->dev,
11523 "MSI without TAGGED_STATUS? Not using MSI\n");
11527 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11528 tg3_flag_set(tp, USING_MSIX);
11529 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11530 tg3_flag_set(tp, USING_MSI);
11532 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11533 u32 msi_mode = tr32(MSGINT_MODE);
11534 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11535 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11536 if (!tg3_flag(tp, 1SHOT_MSI))
11537 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11538 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11541 if (!tg3_flag(tp, USING_MSIX)) {
11543 tp->napi[0].irq_vec = tp->pdev->irq;
11546 if (tp->irq_cnt == 1) {
11549 netif_set_real_num_tx_queues(tp->dev, 1);
11550 netif_set_real_num_rx_queues(tp->dev, 1);
11554 static void tg3_ints_fini(struct tg3 *tp)
11556 if (tg3_flag(tp, USING_MSIX))
11557 pci_disable_msix(tp->pdev);
11558 else if (tg3_flag(tp, USING_MSI))
11559 pci_disable_msi(tp->pdev);
11560 tg3_flag_clear(tp, USING_MSI);
11561 tg3_flag_clear(tp, USING_MSIX);
11562 tg3_flag_clear(tp, ENABLE_RSS);
11563 tg3_flag_clear(tp, ENABLE_TSS);
11566 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11569 struct net_device *dev = tp->dev;
11573 * Setup interrupts first so we know how
11574 * many NAPI resources to allocate
11578 tg3_rss_check_indir_tbl(tp);
11580 /* The placement of this call is tied
11581 * to the setup and use of Host TX descriptors.
11583 err = tg3_alloc_consistent(tp);
11585 goto out_ints_fini;
11589 tg3_napi_enable(tp);
11591 for (i = 0; i < tp->irq_cnt; i++) {
11592 err = tg3_request_irq(tp, i);
11594 for (i--; i >= 0; i--) {
11595 struct tg3_napi *tnapi = &tp->napi[i];
11597 free_irq(tnapi->irq_vec, tnapi);
11599 goto out_napi_fini;
11603 tg3_full_lock(tp, 0);
11606 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11608 err = tg3_init_hw(tp, reset_phy);
11610 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11611 tg3_free_rings(tp);
11614 tg3_full_unlock(tp);
11619 if (test_irq && tg3_flag(tp, USING_MSI)) {
11620 err = tg3_test_msi(tp);
11623 tg3_full_lock(tp, 0);
11624 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11625 tg3_free_rings(tp);
11626 tg3_full_unlock(tp);
11628 goto out_napi_fini;
11631 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11632 u32 val = tr32(PCIE_TRANSACTION_CFG);
11634 tw32(PCIE_TRANSACTION_CFG,
11635 val | PCIE_TRANS_CFG_1SHOT_MSI);
11641 tg3_hwmon_open(tp);
11643 tg3_full_lock(tp, 0);
11645 tg3_timer_start(tp);
11646 tg3_flag_set(tp, INIT_COMPLETE);
11647 tg3_enable_ints(tp);
11649 tg3_ptp_resume(tp);
11651 tg3_full_unlock(tp);
11653 netif_tx_start_all_queues(dev);
11656 * Reset loopback feature if it was turned on while the device was down
11657 * make sure that it's installed properly now.
11659 if (dev->features & NETIF_F_LOOPBACK)
11660 tg3_set_loopback(dev, dev->features);
11665 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11666 struct tg3_napi *tnapi = &tp->napi[i];
11667 free_irq(tnapi->irq_vec, tnapi);
11671 tg3_napi_disable(tp);
11673 tg3_free_consistent(tp);
11681 static void tg3_stop(struct tg3 *tp)
11685 tg3_reset_task_cancel(tp);
11686 tg3_netif_stop(tp);
11688 tg3_timer_stop(tp);
11690 tg3_hwmon_close(tp);
11694 tg3_full_lock(tp, 1);
11696 tg3_disable_ints(tp);
11698 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11699 tg3_free_rings(tp);
11700 tg3_flag_clear(tp, INIT_COMPLETE);
11702 tg3_full_unlock(tp);
11704 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11705 struct tg3_napi *tnapi = &tp->napi[i];
11706 free_irq(tnapi->irq_vec, tnapi);
11713 tg3_free_consistent(tp);
11716 static int tg3_open(struct net_device *dev)
11718 struct tg3 *tp = netdev_priv(dev);
11721 if (tp->pcierr_recovery) {
11722 netdev_err(dev, "Failed to open device. PCI error recovery "
11727 if (tp->fw_needed) {
11728 err = tg3_request_firmware(tp);
11729 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11731 netdev_warn(tp->dev, "EEE capability disabled\n");
11732 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11733 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11734 netdev_warn(tp->dev, "EEE capability restored\n");
11735 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11737 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11741 netdev_warn(tp->dev, "TSO capability disabled\n");
11742 tg3_flag_clear(tp, TSO_CAPABLE);
11743 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11744 netdev_notice(tp->dev, "TSO capability restored\n");
11745 tg3_flag_set(tp, TSO_CAPABLE);
11749 tg3_carrier_off(tp);
11751 err = tg3_power_up(tp);
11755 tg3_full_lock(tp, 0);
11757 tg3_disable_ints(tp);
11758 tg3_flag_clear(tp, INIT_COMPLETE);
11760 tg3_full_unlock(tp);
11762 err = tg3_start(tp,
11763 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11766 tg3_frob_aux_power(tp, false);
11767 pci_set_power_state(tp->pdev, PCI_D3hot);
11773 static int tg3_close(struct net_device *dev)
11775 struct tg3 *tp = netdev_priv(dev);
11777 if (tp->pcierr_recovery) {
11778 netdev_err(dev, "Failed to close device. PCI error recovery "
11785 if (pci_device_is_present(tp->pdev)) {
11786 tg3_power_down_prepare(tp);
11788 tg3_carrier_off(tp);
11793 static inline u64 get_stat64(tg3_stat64_t *val)
11795 return ((u64)val->high << 32) | ((u64)val->low);
11798 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11800 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11802 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11803 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11804 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11807 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11808 tg3_writephy(tp, MII_TG3_TEST1,
11809 val | MII_TG3_TEST1_CRC_EN);
11810 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11814 tp->phy_crc_errors += val;
11816 return tp->phy_crc_errors;
11819 return get_stat64(&hw_stats->rx_fcs_errors);
11822 #define ESTAT_ADD(member) \
11823 estats->member = old_estats->member + \
11824 get_stat64(&hw_stats->member)
11826 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11828 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11829 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11831 ESTAT_ADD(rx_octets);
11832 ESTAT_ADD(rx_fragments);
11833 ESTAT_ADD(rx_ucast_packets);
11834 ESTAT_ADD(rx_mcast_packets);
11835 ESTAT_ADD(rx_bcast_packets);
11836 ESTAT_ADD(rx_fcs_errors);
11837 ESTAT_ADD(rx_align_errors);
11838 ESTAT_ADD(rx_xon_pause_rcvd);
11839 ESTAT_ADD(rx_xoff_pause_rcvd);
11840 ESTAT_ADD(rx_mac_ctrl_rcvd);
11841 ESTAT_ADD(rx_xoff_entered);
11842 ESTAT_ADD(rx_frame_too_long_errors);
11843 ESTAT_ADD(rx_jabbers);
11844 ESTAT_ADD(rx_undersize_packets);
11845 ESTAT_ADD(rx_in_length_errors);
11846 ESTAT_ADD(rx_out_length_errors);
11847 ESTAT_ADD(rx_64_or_less_octet_packets);
11848 ESTAT_ADD(rx_65_to_127_octet_packets);
11849 ESTAT_ADD(rx_128_to_255_octet_packets);
11850 ESTAT_ADD(rx_256_to_511_octet_packets);
11851 ESTAT_ADD(rx_512_to_1023_octet_packets);
11852 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11853 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11854 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11855 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11856 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11858 ESTAT_ADD(tx_octets);
11859 ESTAT_ADD(tx_collisions);
11860 ESTAT_ADD(tx_xon_sent);
11861 ESTAT_ADD(tx_xoff_sent);
11862 ESTAT_ADD(tx_flow_control);
11863 ESTAT_ADD(tx_mac_errors);
11864 ESTAT_ADD(tx_single_collisions);
11865 ESTAT_ADD(tx_mult_collisions);
11866 ESTAT_ADD(tx_deferred);
11867 ESTAT_ADD(tx_excessive_collisions);
11868 ESTAT_ADD(tx_late_collisions);
11869 ESTAT_ADD(tx_collide_2times);
11870 ESTAT_ADD(tx_collide_3times);
11871 ESTAT_ADD(tx_collide_4times);
11872 ESTAT_ADD(tx_collide_5times);
11873 ESTAT_ADD(tx_collide_6times);
11874 ESTAT_ADD(tx_collide_7times);
11875 ESTAT_ADD(tx_collide_8times);
11876 ESTAT_ADD(tx_collide_9times);
11877 ESTAT_ADD(tx_collide_10times);
11878 ESTAT_ADD(tx_collide_11times);
11879 ESTAT_ADD(tx_collide_12times);
11880 ESTAT_ADD(tx_collide_13times);
11881 ESTAT_ADD(tx_collide_14times);
11882 ESTAT_ADD(tx_collide_15times);
11883 ESTAT_ADD(tx_ucast_packets);
11884 ESTAT_ADD(tx_mcast_packets);
11885 ESTAT_ADD(tx_bcast_packets);
11886 ESTAT_ADD(tx_carrier_sense_errors);
11887 ESTAT_ADD(tx_discards);
11888 ESTAT_ADD(tx_errors);
11890 ESTAT_ADD(dma_writeq_full);
11891 ESTAT_ADD(dma_write_prioq_full);
11892 ESTAT_ADD(rxbds_empty);
11893 ESTAT_ADD(rx_discards);
11894 ESTAT_ADD(rx_errors);
11895 ESTAT_ADD(rx_threshold_hit);
11897 ESTAT_ADD(dma_readq_full);
11898 ESTAT_ADD(dma_read_prioq_full);
11899 ESTAT_ADD(tx_comp_queue_full);
11901 ESTAT_ADD(ring_set_send_prod_index);
11902 ESTAT_ADD(ring_status_update);
11903 ESTAT_ADD(nic_irqs);
11904 ESTAT_ADD(nic_avoided_irqs);
11905 ESTAT_ADD(nic_tx_threshold_hit);
11907 ESTAT_ADD(mbuf_lwm_thresh_hit);
11910 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11912 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11913 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11915 stats->rx_packets = old_stats->rx_packets +
11916 get_stat64(&hw_stats->rx_ucast_packets) +
11917 get_stat64(&hw_stats->rx_mcast_packets) +
11918 get_stat64(&hw_stats->rx_bcast_packets);
11920 stats->tx_packets = old_stats->tx_packets +
11921 get_stat64(&hw_stats->tx_ucast_packets) +
11922 get_stat64(&hw_stats->tx_mcast_packets) +
11923 get_stat64(&hw_stats->tx_bcast_packets);
11925 stats->rx_bytes = old_stats->rx_bytes +
11926 get_stat64(&hw_stats->rx_octets);
11927 stats->tx_bytes = old_stats->tx_bytes +
11928 get_stat64(&hw_stats->tx_octets);
11930 stats->rx_errors = old_stats->rx_errors +
11931 get_stat64(&hw_stats->rx_errors);
11932 stats->tx_errors = old_stats->tx_errors +
11933 get_stat64(&hw_stats->tx_errors) +
11934 get_stat64(&hw_stats->tx_mac_errors) +
11935 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11936 get_stat64(&hw_stats->tx_discards);
11938 stats->multicast = old_stats->multicast +
11939 get_stat64(&hw_stats->rx_mcast_packets);
11940 stats->collisions = old_stats->collisions +
11941 get_stat64(&hw_stats->tx_collisions);
11943 stats->rx_length_errors = old_stats->rx_length_errors +
11944 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11945 get_stat64(&hw_stats->rx_undersize_packets);
11947 stats->rx_frame_errors = old_stats->rx_frame_errors +
11948 get_stat64(&hw_stats->rx_align_errors);
11949 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11950 get_stat64(&hw_stats->tx_discards);
11951 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11952 get_stat64(&hw_stats->tx_carrier_sense_errors);
11954 stats->rx_crc_errors = old_stats->rx_crc_errors +
11955 tg3_calc_crc_errors(tp);
11957 stats->rx_missed_errors = old_stats->rx_missed_errors +
11958 get_stat64(&hw_stats->rx_discards);
11960 stats->rx_dropped = tp->rx_dropped;
11961 stats->tx_dropped = tp->tx_dropped;
11964 static int tg3_get_regs_len(struct net_device *dev)
11966 return TG3_REG_BLK_SIZE;
11969 static void tg3_get_regs(struct net_device *dev,
11970 struct ethtool_regs *regs, void *_p)
11972 struct tg3 *tp = netdev_priv(dev);
11976 memset(_p, 0, TG3_REG_BLK_SIZE);
11978 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11981 tg3_full_lock(tp, 0);
11983 tg3_dump_legacy_regs(tp, (u32 *)_p);
11985 tg3_full_unlock(tp);
11988 static int tg3_get_eeprom_len(struct net_device *dev)
11990 struct tg3 *tp = netdev_priv(dev);
11992 return tp->nvram_size;
11995 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11997 struct tg3 *tp = netdev_priv(dev);
11998 int ret, cpmu_restore = 0;
12000 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12003 if (tg3_flag(tp, NO_NVRAM))
12006 offset = eeprom->offset;
12010 eeprom->magic = TG3_EEPROM_MAGIC;
12012 /* Override clock, link aware and link idle modes */
12013 if (tg3_flag(tp, CPMU_PRESENT)) {
12014 cpmu_val = tr32(TG3_CPMU_CTRL);
12015 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12016 CPMU_CTRL_LINK_IDLE_MODE)) {
12017 tw32(TG3_CPMU_CTRL, cpmu_val &
12018 ~(CPMU_CTRL_LINK_AWARE_MODE |
12019 CPMU_CTRL_LINK_IDLE_MODE));
12023 tg3_override_clk(tp);
12026 /* adjustments to start on required 4 byte boundary */
12027 b_offset = offset & 3;
12028 b_count = 4 - b_offset;
12029 if (b_count > len) {
12030 /* i.e. offset=1 len=2 */
12033 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12036 memcpy(data, ((char *)&val) + b_offset, b_count);
12039 eeprom->len += b_count;
12042 /* read bytes up to the last 4 byte boundary */
12043 pd = &data[eeprom->len];
12044 for (i = 0; i < (len - (len & 3)); i += 4) {
12045 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12052 memcpy(pd + i, &val, 4);
12053 if (need_resched()) {
12054 if (signal_pending(current)) {
12065 /* read last bytes not ending on 4 byte boundary */
12066 pd = &data[eeprom->len];
12068 b_offset = offset + len - b_count;
12069 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12072 memcpy(pd, &val, b_count);
12073 eeprom->len += b_count;
12078 /* Restore clock, link aware and link idle modes */
12079 tg3_restore_clk(tp);
12081 tw32(TG3_CPMU_CTRL, cpmu_val);
12086 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12088 struct tg3 *tp = netdev_priv(dev);
12090 u32 offset, len, b_offset, odd_len;
12092 __be32 start = 0, end;
12094 if (tg3_flag(tp, NO_NVRAM) ||
12095 eeprom->magic != TG3_EEPROM_MAGIC)
12098 offset = eeprom->offset;
12101 if ((b_offset = (offset & 3))) {
12102 /* adjustments to start on required 4 byte boundary */
12103 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12114 /* adjustments to end on required 4 byte boundary */
12116 len = (len + 3) & ~3;
12117 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12123 if (b_offset || odd_len) {
12124 buf = kmalloc(len, GFP_KERNEL);
12128 memcpy(buf, &start, 4);
12130 memcpy(buf+len-4, &end, 4);
12131 memcpy(buf + b_offset, data, eeprom->len);
12134 ret = tg3_nvram_write_block(tp, offset, len, buf);
12142 static int tg3_get_link_ksettings(struct net_device *dev,
12143 struct ethtool_link_ksettings *cmd)
12145 struct tg3 *tp = netdev_priv(dev);
12146 u32 supported, advertising;
12148 if (tg3_flag(tp, USE_PHYLIB)) {
12149 struct phy_device *phydev;
12150 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12152 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12153 phy_ethtool_ksettings_get(phydev, cmd);
12158 supported = (SUPPORTED_Autoneg);
12160 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12161 supported |= (SUPPORTED_1000baseT_Half |
12162 SUPPORTED_1000baseT_Full);
12164 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12165 supported |= (SUPPORTED_100baseT_Half |
12166 SUPPORTED_100baseT_Full |
12167 SUPPORTED_10baseT_Half |
12168 SUPPORTED_10baseT_Full |
12170 cmd->base.port = PORT_TP;
12172 supported |= SUPPORTED_FIBRE;
12173 cmd->base.port = PORT_FIBRE;
12175 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12178 advertising = tp->link_config.advertising;
12179 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12180 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12181 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182 advertising |= ADVERTISED_Pause;
12184 advertising |= ADVERTISED_Pause |
12185 ADVERTISED_Asym_Pause;
12187 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12188 advertising |= ADVERTISED_Asym_Pause;
12191 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12194 if (netif_running(dev) && tp->link_up) {
12195 cmd->base.speed = tp->link_config.active_speed;
12196 cmd->base.duplex = tp->link_config.active_duplex;
12197 ethtool_convert_legacy_u32_to_link_mode(
12198 cmd->link_modes.lp_advertising,
12199 tp->link_config.rmt_adv);
12201 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12202 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12203 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12205 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12208 cmd->base.speed = SPEED_UNKNOWN;
12209 cmd->base.duplex = DUPLEX_UNKNOWN;
12210 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12212 cmd->base.phy_address = tp->phy_addr;
12213 cmd->base.autoneg = tp->link_config.autoneg;
12217 static int tg3_set_link_ksettings(struct net_device *dev,
12218 const struct ethtool_link_ksettings *cmd)
12220 struct tg3 *tp = netdev_priv(dev);
12221 u32 speed = cmd->base.speed;
12224 if (tg3_flag(tp, USE_PHYLIB)) {
12225 struct phy_device *phydev;
12226 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12228 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12229 return phy_ethtool_ksettings_set(phydev, cmd);
12232 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12233 cmd->base.autoneg != AUTONEG_DISABLE)
12236 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12237 cmd->base.duplex != DUPLEX_FULL &&
12238 cmd->base.duplex != DUPLEX_HALF)
12241 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12242 cmd->link_modes.advertising);
12244 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12245 u32 mask = ADVERTISED_Autoneg |
12247 ADVERTISED_Asym_Pause;
12249 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12250 mask |= ADVERTISED_1000baseT_Half |
12251 ADVERTISED_1000baseT_Full;
12253 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12254 mask |= ADVERTISED_100baseT_Half |
12255 ADVERTISED_100baseT_Full |
12256 ADVERTISED_10baseT_Half |
12257 ADVERTISED_10baseT_Full |
12260 mask |= ADVERTISED_FIBRE;
12262 if (advertising & ~mask)
12265 mask &= (ADVERTISED_1000baseT_Half |
12266 ADVERTISED_1000baseT_Full |
12267 ADVERTISED_100baseT_Half |
12268 ADVERTISED_100baseT_Full |
12269 ADVERTISED_10baseT_Half |
12270 ADVERTISED_10baseT_Full);
12272 advertising &= mask;
12274 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12275 if (speed != SPEED_1000)
12278 if (cmd->base.duplex != DUPLEX_FULL)
12281 if (speed != SPEED_100 &&
12287 tg3_full_lock(tp, 0);
12289 tp->link_config.autoneg = cmd->base.autoneg;
12290 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12291 tp->link_config.advertising = (advertising |
12292 ADVERTISED_Autoneg);
12293 tp->link_config.speed = SPEED_UNKNOWN;
12294 tp->link_config.duplex = DUPLEX_UNKNOWN;
12296 tp->link_config.advertising = 0;
12297 tp->link_config.speed = speed;
12298 tp->link_config.duplex = cmd->base.duplex;
12301 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12303 tg3_warn_mgmt_link_flap(tp);
12305 if (netif_running(dev))
12306 tg3_setup_phy(tp, true);
12308 tg3_full_unlock(tp);
12313 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12315 struct tg3 *tp = netdev_priv(dev);
12317 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12318 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12319 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12320 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12323 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12325 struct tg3 *tp = netdev_priv(dev);
12327 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12328 wol->supported = WAKE_MAGIC;
12330 wol->supported = 0;
12332 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12333 wol->wolopts = WAKE_MAGIC;
12334 memset(&wol->sopass, 0, sizeof(wol->sopass));
12337 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12339 struct tg3 *tp = netdev_priv(dev);
12340 struct device *dp = &tp->pdev->dev;
12342 if (wol->wolopts & ~WAKE_MAGIC)
12344 if ((wol->wolopts & WAKE_MAGIC) &&
12345 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12348 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12350 if (device_may_wakeup(dp))
12351 tg3_flag_set(tp, WOL_ENABLE);
12353 tg3_flag_clear(tp, WOL_ENABLE);
12358 static u32 tg3_get_msglevel(struct net_device *dev)
12360 struct tg3 *tp = netdev_priv(dev);
12361 return tp->msg_enable;
12364 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12366 struct tg3 *tp = netdev_priv(dev);
12367 tp->msg_enable = value;
12370 static int tg3_nway_reset(struct net_device *dev)
12372 struct tg3 *tp = netdev_priv(dev);
12375 if (!netif_running(dev))
12378 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12381 tg3_warn_mgmt_link_flap(tp);
12383 if (tg3_flag(tp, USE_PHYLIB)) {
12384 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12386 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12390 spin_lock_bh(&tp->lock);
12392 tg3_readphy(tp, MII_BMCR, &bmcr);
12393 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12394 ((bmcr & BMCR_ANENABLE) ||
12395 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12396 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12400 spin_unlock_bh(&tp->lock);
12406 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12408 struct tg3 *tp = netdev_priv(dev);
12410 ering->rx_max_pending = tp->rx_std_ring_mask;
12411 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12412 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12414 ering->rx_jumbo_max_pending = 0;
12416 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12418 ering->rx_pending = tp->rx_pending;
12419 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12420 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12422 ering->rx_jumbo_pending = 0;
12424 ering->tx_pending = tp->napi[0].tx_pending;
12427 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12429 struct tg3 *tp = netdev_priv(dev);
12430 int i, irq_sync = 0, err = 0;
12431 bool reset_phy = false;
12433 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12434 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12435 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12436 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12437 (tg3_flag(tp, TSO_BUG) &&
12438 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12441 if (netif_running(dev)) {
12443 tg3_netif_stop(tp);
12447 tg3_full_lock(tp, irq_sync);
12449 tp->rx_pending = ering->rx_pending;
12451 if (tg3_flag(tp, MAX_RXPEND_64) &&
12452 tp->rx_pending > 63)
12453 tp->rx_pending = 63;
12455 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12456 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12458 for (i = 0; i < tp->irq_max; i++)
12459 tp->napi[i].tx_pending = ering->tx_pending;
12461 if (netif_running(dev)) {
12462 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12463 /* Reset PHY to avoid PHY lock up */
12464 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12465 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12466 tg3_asic_rev(tp) == ASIC_REV_5720)
12469 err = tg3_restart_hw(tp, reset_phy);
12471 tg3_netif_start(tp);
12474 tg3_full_unlock(tp);
12476 if (irq_sync && !err)
12482 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12484 struct tg3 *tp = netdev_priv(dev);
12486 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12488 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12489 epause->rx_pause = 1;
12491 epause->rx_pause = 0;
12493 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12494 epause->tx_pause = 1;
12496 epause->tx_pause = 0;
12499 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12501 struct tg3 *tp = netdev_priv(dev);
12503 bool reset_phy = false;
12505 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12506 tg3_warn_mgmt_link_flap(tp);
12508 if (tg3_flag(tp, USE_PHYLIB)) {
12509 struct phy_device *phydev;
12511 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12513 if (!phy_validate_pause(phydev, epause))
12516 tp->link_config.flowctrl = 0;
12517 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12518 if (epause->rx_pause) {
12519 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12521 if (epause->tx_pause) {
12522 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12524 } else if (epause->tx_pause) {
12525 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12528 if (epause->autoneg)
12529 tg3_flag_set(tp, PAUSE_AUTONEG);
12531 tg3_flag_clear(tp, PAUSE_AUTONEG);
12533 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12534 if (phydev->autoneg) {
12535 /* phy_set_asym_pause() will
12536 * renegotiate the link to inform our
12537 * link partner of our flow control
12538 * settings, even if the flow control
12539 * is forced. Let tg3_adjust_link()
12540 * do the final flow control setup.
12545 if (!epause->autoneg)
12546 tg3_setup_flow_control(tp, 0, 0);
12551 if (netif_running(dev)) {
12552 tg3_netif_stop(tp);
12556 tg3_full_lock(tp, irq_sync);
12558 if (epause->autoneg)
12559 tg3_flag_set(tp, PAUSE_AUTONEG);
12561 tg3_flag_clear(tp, PAUSE_AUTONEG);
12562 if (epause->rx_pause)
12563 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12565 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12566 if (epause->tx_pause)
12567 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12569 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12571 if (netif_running(dev)) {
12572 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12573 /* Reset PHY to avoid PHY lock up */
12574 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12575 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12576 tg3_asic_rev(tp) == ASIC_REV_5720)
12579 err = tg3_restart_hw(tp, reset_phy);
12581 tg3_netif_start(tp);
12584 tg3_full_unlock(tp);
12587 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12592 static int tg3_get_sset_count(struct net_device *dev, int sset)
12596 return TG3_NUM_TEST;
12598 return TG3_NUM_STATS;
12600 return -EOPNOTSUPP;
12604 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12605 u32 *rules __always_unused)
12607 struct tg3 *tp = netdev_priv(dev);
12609 if (!tg3_flag(tp, SUPPORT_MSIX))
12610 return -EOPNOTSUPP;
12612 switch (info->cmd) {
12613 case ETHTOOL_GRXRINGS:
12614 if (netif_running(tp->dev))
12615 info->data = tp->rxq_cnt;
12617 info->data = num_online_cpus();
12618 if (info->data > TG3_RSS_MAX_NUM_QS)
12619 info->data = TG3_RSS_MAX_NUM_QS;
12625 return -EOPNOTSUPP;
12629 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12632 struct tg3 *tp = netdev_priv(dev);
12634 if (tg3_flag(tp, SUPPORT_MSIX))
12635 size = TG3_RSS_INDIR_TBL_SIZE;
12640 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12642 struct tg3 *tp = netdev_priv(dev);
12646 *hfunc = ETH_RSS_HASH_TOP;
12650 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12651 indir[i] = tp->rss_ind_tbl[i];
12656 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12659 struct tg3 *tp = netdev_priv(dev);
12662 /* We require at least one supported parameter to be changed and no
12663 * change in any of the unsupported parameters
12666 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12667 return -EOPNOTSUPP;
12672 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12673 tp->rss_ind_tbl[i] = indir[i];
12675 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12678 /* It is legal to write the indirection
12679 * table while the device is running.
12681 tg3_full_lock(tp, 0);
12682 tg3_rss_write_indir_tbl(tp);
12683 tg3_full_unlock(tp);
12688 static void tg3_get_channels(struct net_device *dev,
12689 struct ethtool_channels *channel)
12691 struct tg3 *tp = netdev_priv(dev);
12692 u32 deflt_qs = netif_get_num_default_rss_queues();
12694 channel->max_rx = tp->rxq_max;
12695 channel->max_tx = tp->txq_max;
12697 if (netif_running(dev)) {
12698 channel->rx_count = tp->rxq_cnt;
12699 channel->tx_count = tp->txq_cnt;
12702 channel->rx_count = tp->rxq_req;
12704 channel->rx_count = min(deflt_qs, tp->rxq_max);
12707 channel->tx_count = tp->txq_req;
12709 channel->tx_count = min(deflt_qs, tp->txq_max);
12713 static int tg3_set_channels(struct net_device *dev,
12714 struct ethtool_channels *channel)
12716 struct tg3 *tp = netdev_priv(dev);
12718 if (!tg3_flag(tp, SUPPORT_MSIX))
12719 return -EOPNOTSUPP;
12721 if (channel->rx_count > tp->rxq_max ||
12722 channel->tx_count > tp->txq_max)
12725 tp->rxq_req = channel->rx_count;
12726 tp->txq_req = channel->tx_count;
12728 if (!netif_running(dev))
12733 tg3_carrier_off(tp);
12735 tg3_start(tp, true, false, false);
12740 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12742 switch (stringset) {
12744 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12747 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12750 WARN_ON(1); /* we need a WARN() */
12755 static int tg3_set_phys_id(struct net_device *dev,
12756 enum ethtool_phys_id_state state)
12758 struct tg3 *tp = netdev_priv(dev);
12760 if (!netif_running(tp->dev))
12764 case ETHTOOL_ID_ACTIVE:
12765 return 1; /* cycle on/off once per second */
12767 case ETHTOOL_ID_ON:
12768 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769 LED_CTRL_1000MBPS_ON |
12770 LED_CTRL_100MBPS_ON |
12771 LED_CTRL_10MBPS_ON |
12772 LED_CTRL_TRAFFIC_OVERRIDE |
12773 LED_CTRL_TRAFFIC_BLINK |
12774 LED_CTRL_TRAFFIC_LED);
12777 case ETHTOOL_ID_OFF:
12778 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12779 LED_CTRL_TRAFFIC_OVERRIDE);
12782 case ETHTOOL_ID_INACTIVE:
12783 tw32(MAC_LED_CTRL, tp->led_ctrl);
12790 static void tg3_get_ethtool_stats(struct net_device *dev,
12791 struct ethtool_stats *estats, u64 *tmp_stats)
12793 struct tg3 *tp = netdev_priv(dev);
12796 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12798 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12801 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12805 u32 offset = 0, len = 0;
12808 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12811 if (magic == TG3_EEPROM_MAGIC) {
12812 for (offset = TG3_NVM_DIR_START;
12813 offset < TG3_NVM_DIR_END;
12814 offset += TG3_NVM_DIRENT_SIZE) {
12815 if (tg3_nvram_read(tp, offset, &val))
12818 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12819 TG3_NVM_DIRTYPE_EXTVPD)
12823 if (offset != TG3_NVM_DIR_END) {
12824 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12825 if (tg3_nvram_read(tp, offset + 4, &offset))
12828 offset = tg3_nvram_logical_addr(tp, offset);
12832 if (!offset || !len) {
12833 offset = TG3_NVM_VPD_OFF;
12834 len = TG3_NVM_VPD_LEN;
12837 buf = kmalloc(len, GFP_KERNEL);
12841 if (magic == TG3_EEPROM_MAGIC) {
12842 for (i = 0; i < len; i += 4) {
12843 /* The data is in little-endian format in NVRAM.
12844 * Use the big-endian read routines to preserve
12845 * the byte order as it exists in NVRAM.
12847 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12853 unsigned int pos = 0;
12855 ptr = (u8 *)&buf[0];
12856 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12857 cnt = pci_read_vpd(tp->pdev, pos,
12859 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12877 #define NVRAM_TEST_SIZE 0x100
12878 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12879 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12880 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12881 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12882 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12883 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12884 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12885 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12887 static int tg3_test_nvram(struct tg3 *tp)
12889 u32 csum, magic, len;
12891 int i, j, k, err = 0, size;
12893 if (tg3_flag(tp, NO_NVRAM))
12896 if (tg3_nvram_read(tp, 0, &magic) != 0)
12899 if (magic == TG3_EEPROM_MAGIC)
12900 size = NVRAM_TEST_SIZE;
12901 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12902 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12903 TG3_EEPROM_SB_FORMAT_1) {
12904 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12905 case TG3_EEPROM_SB_REVISION_0:
12906 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12908 case TG3_EEPROM_SB_REVISION_2:
12909 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12911 case TG3_EEPROM_SB_REVISION_3:
12912 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12914 case TG3_EEPROM_SB_REVISION_4:
12915 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12917 case TG3_EEPROM_SB_REVISION_5:
12918 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12920 case TG3_EEPROM_SB_REVISION_6:
12921 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12928 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12929 size = NVRAM_SELFBOOT_HW_SIZE;
12933 buf = kmalloc(size, GFP_KERNEL);
12938 for (i = 0, j = 0; i < size; i += 4, j++) {
12939 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12946 /* Selfboot format */
12947 magic = be32_to_cpu(buf[0]);
12948 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12949 TG3_EEPROM_MAGIC_FW) {
12950 u8 *buf8 = (u8 *) buf, csum8 = 0;
12952 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12953 TG3_EEPROM_SB_REVISION_2) {
12954 /* For rev 2, the csum doesn't include the MBA. */
12955 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12957 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12960 for (i = 0; i < size; i++)
12973 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12974 TG3_EEPROM_MAGIC_HW) {
12975 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12976 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12977 u8 *buf8 = (u8 *) buf;
12979 /* Separate the parity bits and the data bytes. */
12980 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12981 if ((i == 0) || (i == 8)) {
12985 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12986 parity[k++] = buf8[i] & msk;
12988 } else if (i == 16) {
12992 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12993 parity[k++] = buf8[i] & msk;
12996 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12997 parity[k++] = buf8[i] & msk;
13000 data[j++] = buf8[i];
13004 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13005 u8 hw8 = hweight8(data[i]);
13007 if ((hw8 & 0x1) && parity[i])
13009 else if (!(hw8 & 0x1) && !parity[i])
13018 /* Bootstrap checksum at offset 0x10 */
13019 csum = calc_crc((unsigned char *) buf, 0x10);
13020 if (csum != le32_to_cpu(buf[0x10/4]))
13023 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13024 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13025 if (csum != le32_to_cpu(buf[0xfc/4]))
13030 buf = tg3_vpd_readblock(tp, &len);
13034 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13036 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13040 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13043 i += PCI_VPD_LRDT_TAG_SIZE;
13044 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13045 PCI_VPD_RO_KEYWORD_CHKSUM);
13049 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13051 for (i = 0; i <= j; i++)
13052 csum8 += ((u8 *)buf)[i];
13066 #define TG3_SERDES_TIMEOUT_SEC 2
13067 #define TG3_COPPER_TIMEOUT_SEC 6
13069 static int tg3_test_link(struct tg3 *tp)
13073 if (!netif_running(tp->dev))
13076 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13077 max = TG3_SERDES_TIMEOUT_SEC;
13079 max = TG3_COPPER_TIMEOUT_SEC;
13081 for (i = 0; i < max; i++) {
13085 if (msleep_interruptible(1000))
13092 /* Only test the commonly used registers */
13093 static int tg3_test_registers(struct tg3 *tp)
13095 int i, is_5705, is_5750;
13096 u32 offset, read_mask, write_mask, val, save_val, read_val;
13100 #define TG3_FL_5705 0x1
13101 #define TG3_FL_NOT_5705 0x2
13102 #define TG3_FL_NOT_5788 0x4
13103 #define TG3_FL_NOT_5750 0x8
13107 /* MAC Control Registers */
13108 { MAC_MODE, TG3_FL_NOT_5705,
13109 0x00000000, 0x00ef6f8c },
13110 { MAC_MODE, TG3_FL_5705,
13111 0x00000000, 0x01ef6b8c },
13112 { MAC_STATUS, TG3_FL_NOT_5705,
13113 0x03800107, 0x00000000 },
13114 { MAC_STATUS, TG3_FL_5705,
13115 0x03800100, 0x00000000 },
13116 { MAC_ADDR_0_HIGH, 0x0000,
13117 0x00000000, 0x0000ffff },
13118 { MAC_ADDR_0_LOW, 0x0000,
13119 0x00000000, 0xffffffff },
13120 { MAC_RX_MTU_SIZE, 0x0000,
13121 0x00000000, 0x0000ffff },
13122 { MAC_TX_MODE, 0x0000,
13123 0x00000000, 0x00000070 },
13124 { MAC_TX_LENGTHS, 0x0000,
13125 0x00000000, 0x00003fff },
13126 { MAC_RX_MODE, TG3_FL_NOT_5705,
13127 0x00000000, 0x000007fc },
13128 { MAC_RX_MODE, TG3_FL_5705,
13129 0x00000000, 0x000007dc },
13130 { MAC_HASH_REG_0, 0x0000,
13131 0x00000000, 0xffffffff },
13132 { MAC_HASH_REG_1, 0x0000,
13133 0x00000000, 0xffffffff },
13134 { MAC_HASH_REG_2, 0x0000,
13135 0x00000000, 0xffffffff },
13136 { MAC_HASH_REG_3, 0x0000,
13137 0x00000000, 0xffffffff },
13139 /* Receive Data and Receive BD Initiator Control Registers. */
13140 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13141 0x00000000, 0xffffffff },
13142 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13143 0x00000000, 0xffffffff },
13144 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13145 0x00000000, 0x00000003 },
13146 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13147 0x00000000, 0xffffffff },
13148 { RCVDBDI_STD_BD+0, 0x0000,
13149 0x00000000, 0xffffffff },
13150 { RCVDBDI_STD_BD+4, 0x0000,
13151 0x00000000, 0xffffffff },
13152 { RCVDBDI_STD_BD+8, 0x0000,
13153 0x00000000, 0xffff0002 },
13154 { RCVDBDI_STD_BD+0xc, 0x0000,
13155 0x00000000, 0xffffffff },
13157 /* Receive BD Initiator Control Registers. */
13158 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13159 0x00000000, 0xffffffff },
13160 { RCVBDI_STD_THRESH, TG3_FL_5705,
13161 0x00000000, 0x000003ff },
13162 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13163 0x00000000, 0xffffffff },
13165 /* Host Coalescing Control Registers. */
13166 { HOSTCC_MODE, TG3_FL_NOT_5705,
13167 0x00000000, 0x00000004 },
13168 { HOSTCC_MODE, TG3_FL_5705,
13169 0x00000000, 0x000000f6 },
13170 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13171 0x00000000, 0xffffffff },
13172 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13173 0x00000000, 0x000003ff },
13174 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13175 0x00000000, 0xffffffff },
13176 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13177 0x00000000, 0x000003ff },
13178 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13179 0x00000000, 0xffffffff },
13180 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13181 0x00000000, 0x000000ff },
13182 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13183 0x00000000, 0xffffffff },
13184 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13185 0x00000000, 0x000000ff },
13186 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13187 0x00000000, 0xffffffff },
13188 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13189 0x00000000, 0xffffffff },
13190 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13191 0x00000000, 0xffffffff },
13192 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13193 0x00000000, 0x000000ff },
13194 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13195 0x00000000, 0xffffffff },
13196 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13197 0x00000000, 0x000000ff },
13198 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13199 0x00000000, 0xffffffff },
13200 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13201 0x00000000, 0xffffffff },
13202 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13203 0x00000000, 0xffffffff },
13204 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13205 0x00000000, 0xffffffff },
13206 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13207 0x00000000, 0xffffffff },
13208 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13209 0xffffffff, 0x00000000 },
13210 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13211 0xffffffff, 0x00000000 },
13213 /* Buffer Manager Control Registers. */
13214 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13215 0x00000000, 0x007fff80 },
13216 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13217 0x00000000, 0x007fffff },
13218 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13219 0x00000000, 0x0000003f },
13220 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13221 0x00000000, 0x000001ff },
13222 { BUFMGR_MB_HIGH_WATER, 0x0000,
13223 0x00000000, 0x000001ff },
13224 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13225 0xffffffff, 0x00000000 },
13226 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13227 0xffffffff, 0x00000000 },
13229 /* Mailbox Registers */
13230 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13231 0x00000000, 0x000001ff },
13232 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13233 0x00000000, 0x000001ff },
13234 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13235 0x00000000, 0x000007ff },
13236 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13237 0x00000000, 0x000001ff },
13239 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13242 is_5705 = is_5750 = 0;
13243 if (tg3_flag(tp, 5705_PLUS)) {
13245 if (tg3_flag(tp, 5750_PLUS))
13249 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13250 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13253 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13256 if (tg3_flag(tp, IS_5788) &&
13257 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13260 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13263 offset = (u32) reg_tbl[i].offset;
13264 read_mask = reg_tbl[i].read_mask;
13265 write_mask = reg_tbl[i].write_mask;
13267 /* Save the original register content */
13268 save_val = tr32(offset);
13270 /* Determine the read-only value. */
13271 read_val = save_val & read_mask;
13273 /* Write zero to the register, then make sure the read-only bits
13274 * are not changed and the read/write bits are all zeros.
13278 val = tr32(offset);
13280 /* Test the read-only and read/write bits. */
13281 if (((val & read_mask) != read_val) || (val & write_mask))
13284 /* Write ones to all the bits defined by RdMask and WrMask, then
13285 * make sure the read-only bits are not changed and the
13286 * read/write bits are all ones.
13288 tw32(offset, read_mask | write_mask);
13290 val = tr32(offset);
13292 /* Test the read-only bits. */
13293 if ((val & read_mask) != read_val)
13296 /* Test the read/write bits. */
13297 if ((val & write_mask) != write_mask)
13300 tw32(offset, save_val);
13306 if (netif_msg_hw(tp))
13307 netdev_err(tp->dev,
13308 "Register test failed at offset %x\n", offset);
13309 tw32(offset, save_val);
13313 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13315 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13319 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13320 for (j = 0; j < len; j += 4) {
13323 tg3_write_mem(tp, offset + j, test_pattern[i]);
13324 tg3_read_mem(tp, offset + j, &val);
13325 if (val != test_pattern[i])
13332 static int tg3_test_memory(struct tg3 *tp)
13334 static struct mem_entry {
13337 } mem_tbl_570x[] = {
13338 { 0x00000000, 0x00b50},
13339 { 0x00002000, 0x1c000},
13340 { 0xffffffff, 0x00000}
13341 }, mem_tbl_5705[] = {
13342 { 0x00000100, 0x0000c},
13343 { 0x00000200, 0x00008},
13344 { 0x00004000, 0x00800},
13345 { 0x00006000, 0x01000},
13346 { 0x00008000, 0x02000},
13347 { 0x00010000, 0x0e000},
13348 { 0xffffffff, 0x00000}
13349 }, mem_tbl_5755[] = {
13350 { 0x00000200, 0x00008},
13351 { 0x00004000, 0x00800},
13352 { 0x00006000, 0x00800},
13353 { 0x00008000, 0x02000},
13354 { 0x00010000, 0x0c000},
13355 { 0xffffffff, 0x00000}
13356 }, mem_tbl_5906[] = {
13357 { 0x00000200, 0x00008},
13358 { 0x00004000, 0x00400},
13359 { 0x00006000, 0x00400},
13360 { 0x00008000, 0x01000},
13361 { 0x00010000, 0x01000},
13362 { 0xffffffff, 0x00000}
13363 }, mem_tbl_5717[] = {
13364 { 0x00000200, 0x00008},
13365 { 0x00010000, 0x0a000},
13366 { 0x00020000, 0x13c00},
13367 { 0xffffffff, 0x00000}
13368 }, mem_tbl_57765[] = {
13369 { 0x00000200, 0x00008},
13370 { 0x00004000, 0x00800},
13371 { 0x00006000, 0x09800},
13372 { 0x00010000, 0x0a000},
13373 { 0xffffffff, 0x00000}
13375 struct mem_entry *mem_tbl;
13379 if (tg3_flag(tp, 5717_PLUS))
13380 mem_tbl = mem_tbl_5717;
13381 else if (tg3_flag(tp, 57765_CLASS) ||
13382 tg3_asic_rev(tp) == ASIC_REV_5762)
13383 mem_tbl = mem_tbl_57765;
13384 else if (tg3_flag(tp, 5755_PLUS))
13385 mem_tbl = mem_tbl_5755;
13386 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13387 mem_tbl = mem_tbl_5906;
13388 else if (tg3_flag(tp, 5705_PLUS))
13389 mem_tbl = mem_tbl_5705;
13391 mem_tbl = mem_tbl_570x;
13393 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13394 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13402 #define TG3_TSO_MSS 500
13404 #define TG3_TSO_IP_HDR_LEN 20
13405 #define TG3_TSO_TCP_HDR_LEN 20
13406 #define TG3_TSO_TCP_OPT_LEN 12
13408 static const u8 tg3_tso_header[] = {
13410 0x45, 0x00, 0x00, 0x00,
13411 0x00, 0x00, 0x40, 0x00,
13412 0x40, 0x06, 0x00, 0x00,
13413 0x0a, 0x00, 0x00, 0x01,
13414 0x0a, 0x00, 0x00, 0x02,
13415 0x0d, 0x00, 0xe0, 0x00,
13416 0x00, 0x00, 0x01, 0x00,
13417 0x00, 0x00, 0x02, 0x00,
13418 0x80, 0x10, 0x10, 0x00,
13419 0x14, 0x09, 0x00, 0x00,
13420 0x01, 0x01, 0x08, 0x0a,
13421 0x11, 0x11, 0x11, 0x11,
13422 0x11, 0x11, 0x11, 0x11,
13425 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13427 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13428 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13430 struct sk_buff *skb;
13431 u8 *tx_data, *rx_data;
13433 int num_pkts, tx_len, rx_len, i, err;
13434 struct tg3_rx_buffer_desc *desc;
13435 struct tg3_napi *tnapi, *rnapi;
13436 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13438 tnapi = &tp->napi[0];
13439 rnapi = &tp->napi[0];
13440 if (tp->irq_cnt > 1) {
13441 if (tg3_flag(tp, ENABLE_RSS))
13442 rnapi = &tp->napi[1];
13443 if (tg3_flag(tp, ENABLE_TSS))
13444 tnapi = &tp->napi[1];
13446 coal_now = tnapi->coal_now | rnapi->coal_now;
13451 skb = netdev_alloc_skb(tp->dev, tx_len);
13455 tx_data = skb_put(skb, tx_len);
13456 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13457 memset(tx_data + ETH_ALEN, 0x0, 8);
13459 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13461 if (tso_loopback) {
13462 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13464 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13465 TG3_TSO_TCP_OPT_LEN;
13467 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13468 sizeof(tg3_tso_header));
13471 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13472 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13474 /* Set the total length field in the IP header */
13475 iph->tot_len = htons((u16)(mss + hdr_len));
13477 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13478 TXD_FLAG_CPU_POST_DMA);
13480 if (tg3_flag(tp, HW_TSO_1) ||
13481 tg3_flag(tp, HW_TSO_2) ||
13482 tg3_flag(tp, HW_TSO_3)) {
13484 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13485 th = (struct tcphdr *)&tx_data[val];
13488 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13490 if (tg3_flag(tp, HW_TSO_3)) {
13491 mss |= (hdr_len & 0xc) << 12;
13492 if (hdr_len & 0x10)
13493 base_flags |= 0x00000010;
13494 base_flags |= (hdr_len & 0x3e0) << 5;
13495 } else if (tg3_flag(tp, HW_TSO_2))
13496 mss |= hdr_len << 9;
13497 else if (tg3_flag(tp, HW_TSO_1) ||
13498 tg3_asic_rev(tp) == ASIC_REV_5705) {
13499 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13501 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13504 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13507 data_off = ETH_HLEN;
13509 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13510 tx_len > VLAN_ETH_FRAME_LEN)
13511 base_flags |= TXD_FLAG_JMB_PKT;
13514 for (i = data_off; i < tx_len; i++)
13515 tx_data[i] = (u8) (i & 0xff);
13517 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13518 if (pci_dma_mapping_error(tp->pdev, map)) {
13519 dev_kfree_skb(skb);
13523 val = tnapi->tx_prod;
13524 tnapi->tx_buffers[val].skb = skb;
13525 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13527 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13532 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13534 budget = tg3_tx_avail(tnapi);
13535 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13536 base_flags | TXD_FLAG_END, mss, 0)) {
13537 tnapi->tx_buffers[val].skb = NULL;
13538 dev_kfree_skb(skb);
13544 /* Sync BD data before updating mailbox */
13547 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13548 tr32_mailbox(tnapi->prodmbox);
13552 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13553 for (i = 0; i < 35; i++) {
13554 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13559 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13560 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13561 if ((tx_idx == tnapi->tx_prod) &&
13562 (rx_idx == (rx_start_idx + num_pkts)))
13566 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13567 dev_kfree_skb(skb);
13569 if (tx_idx != tnapi->tx_prod)
13572 if (rx_idx != rx_start_idx + num_pkts)
13576 while (rx_idx != rx_start_idx) {
13577 desc = &rnapi->rx_rcb[rx_start_idx++];
13578 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13579 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13581 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13582 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13585 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13588 if (!tso_loopback) {
13589 if (rx_len != tx_len)
13592 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13593 if (opaque_key != RXD_OPAQUE_RING_STD)
13596 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13599 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13600 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13601 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13605 if (opaque_key == RXD_OPAQUE_RING_STD) {
13606 rx_data = tpr->rx_std_buffers[desc_idx].data;
13607 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13609 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13610 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13611 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13616 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13617 PCI_DMA_FROMDEVICE);
13619 rx_data += TG3_RX_OFFSET(tp);
13620 for (i = data_off; i < rx_len; i++, val++) {
13621 if (*(rx_data + i) != (u8) (val & 0xff))
13628 /* tg3_free_rings will unmap and free the rx_data */
13633 #define TG3_STD_LOOPBACK_FAILED 1
13634 #define TG3_JMB_LOOPBACK_FAILED 2
13635 #define TG3_TSO_LOOPBACK_FAILED 4
13636 #define TG3_LOOPBACK_FAILED \
13637 (TG3_STD_LOOPBACK_FAILED | \
13638 TG3_JMB_LOOPBACK_FAILED | \
13639 TG3_TSO_LOOPBACK_FAILED)
13641 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13645 u32 jmb_pkt_sz = 9000;
13648 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13650 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13651 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13653 if (!netif_running(tp->dev)) {
13654 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13657 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13661 err = tg3_reset_hw(tp, true);
13663 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13664 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13666 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13670 if (tg3_flag(tp, ENABLE_RSS)) {
13673 /* Reroute all rx packets to the 1st queue */
13674 for (i = MAC_RSS_INDIR_TBL_0;
13675 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13679 /* HW errata - mac loopback fails in some cases on 5780.
13680 * Normal traffic and PHY loopback are not affected by
13681 * errata. Also, the MAC loopback test is deprecated for
13682 * all newer ASIC revisions.
13684 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13685 !tg3_flag(tp, CPMU_PRESENT)) {
13686 tg3_mac_loopback(tp, true);
13688 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13689 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13691 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13692 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13693 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13695 tg3_mac_loopback(tp, false);
13698 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13699 !tg3_flag(tp, USE_PHYLIB)) {
13702 tg3_phy_lpbk_set(tp, 0, false);
13704 /* Wait for link */
13705 for (i = 0; i < 100; i++) {
13706 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13711 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13712 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13713 if (tg3_flag(tp, TSO_CAPABLE) &&
13714 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13715 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13716 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13717 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13718 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13721 tg3_phy_lpbk_set(tp, 0, true);
13723 /* All link indications report up, but the hardware
13724 * isn't really ready for about 20 msec. Double it
13729 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13730 data[TG3_EXT_LOOPB_TEST] |=
13731 TG3_STD_LOOPBACK_FAILED;
13732 if (tg3_flag(tp, TSO_CAPABLE) &&
13733 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13734 data[TG3_EXT_LOOPB_TEST] |=
13735 TG3_TSO_LOOPBACK_FAILED;
13736 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13737 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13738 data[TG3_EXT_LOOPB_TEST] |=
13739 TG3_JMB_LOOPBACK_FAILED;
13742 /* Re-enable gphy autopowerdown. */
13743 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13744 tg3_phy_toggle_apd(tp, true);
13747 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13748 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13751 tp->phy_flags |= eee_cap;
13756 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13759 struct tg3 *tp = netdev_priv(dev);
13760 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13762 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13763 if (tg3_power_up(tp)) {
13764 etest->flags |= ETH_TEST_FL_FAILED;
13765 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13768 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13771 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13773 if (tg3_test_nvram(tp) != 0) {
13774 etest->flags |= ETH_TEST_FL_FAILED;
13775 data[TG3_NVRAM_TEST] = 1;
13777 if (!doextlpbk && tg3_test_link(tp)) {
13778 etest->flags |= ETH_TEST_FL_FAILED;
13779 data[TG3_LINK_TEST] = 1;
13781 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13782 int err, err2 = 0, irq_sync = 0;
13784 if (netif_running(dev)) {
13786 tg3_netif_stop(tp);
13790 tg3_full_lock(tp, irq_sync);
13791 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13792 err = tg3_nvram_lock(tp);
13793 tg3_halt_cpu(tp, RX_CPU_BASE);
13794 if (!tg3_flag(tp, 5705_PLUS))
13795 tg3_halt_cpu(tp, TX_CPU_BASE);
13797 tg3_nvram_unlock(tp);
13799 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13802 if (tg3_test_registers(tp) != 0) {
13803 etest->flags |= ETH_TEST_FL_FAILED;
13804 data[TG3_REGISTER_TEST] = 1;
13807 if (tg3_test_memory(tp) != 0) {
13808 etest->flags |= ETH_TEST_FL_FAILED;
13809 data[TG3_MEMORY_TEST] = 1;
13813 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13815 if (tg3_test_loopback(tp, data, doextlpbk))
13816 etest->flags |= ETH_TEST_FL_FAILED;
13818 tg3_full_unlock(tp);
13820 if (tg3_test_interrupt(tp) != 0) {
13821 etest->flags |= ETH_TEST_FL_FAILED;
13822 data[TG3_INTERRUPT_TEST] = 1;
13825 tg3_full_lock(tp, 0);
13827 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13828 if (netif_running(dev)) {
13829 tg3_flag_set(tp, INIT_COMPLETE);
13830 err2 = tg3_restart_hw(tp, true);
13832 tg3_netif_start(tp);
13835 tg3_full_unlock(tp);
13837 if (irq_sync && !err2)
13840 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13841 tg3_power_down_prepare(tp);
13845 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13847 struct tg3 *tp = netdev_priv(dev);
13848 struct hwtstamp_config stmpconf;
13850 if (!tg3_flag(tp, PTP_CAPABLE))
13851 return -EOPNOTSUPP;
13853 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13856 if (stmpconf.flags)
13859 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13860 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13863 switch (stmpconf.rx_filter) {
13864 case HWTSTAMP_FILTER_NONE:
13867 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13869 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13871 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13873 TG3_RX_PTP_CTL_SYNC_EVNT;
13875 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13877 TG3_RX_PTP_CTL_DELAY_REQ;
13879 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13880 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13881 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13883 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13884 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13885 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13887 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13888 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13889 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13891 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13892 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13893 TG3_RX_PTP_CTL_SYNC_EVNT;
13895 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13896 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13897 TG3_RX_PTP_CTL_SYNC_EVNT;
13899 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13900 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13901 TG3_RX_PTP_CTL_SYNC_EVNT;
13903 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13904 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13905 TG3_RX_PTP_CTL_DELAY_REQ;
13907 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13908 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13909 TG3_RX_PTP_CTL_DELAY_REQ;
13911 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13912 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13913 TG3_RX_PTP_CTL_DELAY_REQ;
13919 if (netif_running(dev) && tp->rxptpctl)
13920 tw32(TG3_RX_PTP_CTL,
13921 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13923 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13924 tg3_flag_set(tp, TX_TSTAMP_EN);
13926 tg3_flag_clear(tp, TX_TSTAMP_EN);
13928 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13932 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13934 struct tg3 *tp = netdev_priv(dev);
13935 struct hwtstamp_config stmpconf;
13937 if (!tg3_flag(tp, PTP_CAPABLE))
13938 return -EOPNOTSUPP;
13940 stmpconf.flags = 0;
13941 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13942 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13944 switch (tp->rxptpctl) {
13946 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13948 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13949 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13951 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13952 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13954 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13955 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13957 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13958 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13960 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13961 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13963 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13964 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13966 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13967 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13969 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13970 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13972 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13973 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13975 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13976 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13978 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13979 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13981 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13982 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13989 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13993 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13995 struct mii_ioctl_data *data = if_mii(ifr);
13996 struct tg3 *tp = netdev_priv(dev);
13999 if (tg3_flag(tp, USE_PHYLIB)) {
14000 struct phy_device *phydev;
14001 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14003 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14004 return phy_mii_ioctl(phydev, ifr, cmd);
14009 data->phy_id = tp->phy_addr;
14012 case SIOCGMIIREG: {
14015 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14016 break; /* We have no PHY */
14018 if (!netif_running(dev))
14021 spin_lock_bh(&tp->lock);
14022 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14023 data->reg_num & 0x1f, &mii_regval);
14024 spin_unlock_bh(&tp->lock);
14026 data->val_out = mii_regval;
14032 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14033 break; /* We have no PHY */
14035 if (!netif_running(dev))
14038 spin_lock_bh(&tp->lock);
14039 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14040 data->reg_num & 0x1f, data->val_in);
14041 spin_unlock_bh(&tp->lock);
14045 case SIOCSHWTSTAMP:
14046 return tg3_hwtstamp_set(dev, ifr);
14048 case SIOCGHWTSTAMP:
14049 return tg3_hwtstamp_get(dev, ifr);
14055 return -EOPNOTSUPP;
14058 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14060 struct tg3 *tp = netdev_priv(dev);
14062 memcpy(ec, &tp->coal, sizeof(*ec));
14066 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14068 struct tg3 *tp = netdev_priv(dev);
14069 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14070 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14072 if (!tg3_flag(tp, 5705_PLUS)) {
14073 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14074 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14075 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14076 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14079 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14080 (!ec->rx_coalesce_usecs) ||
14081 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14082 (!ec->tx_coalesce_usecs) ||
14083 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14084 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14085 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14086 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14087 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14088 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14089 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14090 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14093 /* Only copy relevant parameters, ignore all others. */
14094 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14095 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14096 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14097 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14098 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14099 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14100 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14101 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14102 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14104 if (netif_running(dev)) {
14105 tg3_full_lock(tp, 0);
14106 __tg3_set_coalesce(tp, &tp->coal);
14107 tg3_full_unlock(tp);
14112 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14114 struct tg3 *tp = netdev_priv(dev);
14116 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14117 netdev_warn(tp->dev, "Board does not support EEE!\n");
14118 return -EOPNOTSUPP;
14121 if (edata->advertised != tp->eee.advertised) {
14122 netdev_warn(tp->dev,
14123 "Direct manipulation of EEE advertisement is not supported\n");
14127 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14128 netdev_warn(tp->dev,
14129 "Maximal Tx Lpi timer supported is %#x(u)\n",
14130 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14136 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14137 tg3_warn_mgmt_link_flap(tp);
14139 if (netif_running(tp->dev)) {
14140 tg3_full_lock(tp, 0);
14143 tg3_full_unlock(tp);
14149 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14151 struct tg3 *tp = netdev_priv(dev);
14153 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14154 netdev_warn(tp->dev,
14155 "Board does not support EEE!\n");
14156 return -EOPNOTSUPP;
14163 static const struct ethtool_ops tg3_ethtool_ops = {
14164 .get_drvinfo = tg3_get_drvinfo,
14165 .get_regs_len = tg3_get_regs_len,
14166 .get_regs = tg3_get_regs,
14167 .get_wol = tg3_get_wol,
14168 .set_wol = tg3_set_wol,
14169 .get_msglevel = tg3_get_msglevel,
14170 .set_msglevel = tg3_set_msglevel,
14171 .nway_reset = tg3_nway_reset,
14172 .get_link = ethtool_op_get_link,
14173 .get_eeprom_len = tg3_get_eeprom_len,
14174 .get_eeprom = tg3_get_eeprom,
14175 .set_eeprom = tg3_set_eeprom,
14176 .get_ringparam = tg3_get_ringparam,
14177 .set_ringparam = tg3_set_ringparam,
14178 .get_pauseparam = tg3_get_pauseparam,
14179 .set_pauseparam = tg3_set_pauseparam,
14180 .self_test = tg3_self_test,
14181 .get_strings = tg3_get_strings,
14182 .set_phys_id = tg3_set_phys_id,
14183 .get_ethtool_stats = tg3_get_ethtool_stats,
14184 .get_coalesce = tg3_get_coalesce,
14185 .set_coalesce = tg3_set_coalesce,
14186 .get_sset_count = tg3_get_sset_count,
14187 .get_rxnfc = tg3_get_rxnfc,
14188 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14189 .get_rxfh = tg3_get_rxfh,
14190 .set_rxfh = tg3_set_rxfh,
14191 .get_channels = tg3_get_channels,
14192 .set_channels = tg3_set_channels,
14193 .get_ts_info = tg3_get_ts_info,
14194 .get_eee = tg3_get_eee,
14195 .set_eee = tg3_set_eee,
14196 .get_link_ksettings = tg3_get_link_ksettings,
14197 .set_link_ksettings = tg3_set_link_ksettings,
14200 static void tg3_get_stats64(struct net_device *dev,
14201 struct rtnl_link_stats64 *stats)
14203 struct tg3 *tp = netdev_priv(dev);
14205 spin_lock_bh(&tp->lock);
14206 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14207 *stats = tp->net_stats_prev;
14208 spin_unlock_bh(&tp->lock);
14212 tg3_get_nstats(tp, stats);
14213 spin_unlock_bh(&tp->lock);
14216 static void tg3_set_rx_mode(struct net_device *dev)
14218 struct tg3 *tp = netdev_priv(dev);
14220 if (!netif_running(dev))
14223 tg3_full_lock(tp, 0);
14224 __tg3_set_rx_mode(dev);
14225 tg3_full_unlock(tp);
14228 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14231 dev->mtu = new_mtu;
14233 if (new_mtu > ETH_DATA_LEN) {
14234 if (tg3_flag(tp, 5780_CLASS)) {
14235 netdev_update_features(dev);
14236 tg3_flag_clear(tp, TSO_CAPABLE);
14238 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14241 if (tg3_flag(tp, 5780_CLASS)) {
14242 tg3_flag_set(tp, TSO_CAPABLE);
14243 netdev_update_features(dev);
14245 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14249 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14251 struct tg3 *tp = netdev_priv(dev);
14253 bool reset_phy = false;
14255 if (!netif_running(dev)) {
14256 /* We'll just catch it later when the
14259 tg3_set_mtu(dev, tp, new_mtu);
14265 tg3_netif_stop(tp);
14267 tg3_set_mtu(dev, tp, new_mtu);
14269 tg3_full_lock(tp, 1);
14271 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14273 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14274 * breaks all requests to 256 bytes.
14276 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14277 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14278 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14279 tg3_asic_rev(tp) == ASIC_REV_5720)
14282 err = tg3_restart_hw(tp, reset_phy);
14285 tg3_netif_start(tp);
14287 tg3_full_unlock(tp);
14295 static const struct net_device_ops tg3_netdev_ops = {
14296 .ndo_open = tg3_open,
14297 .ndo_stop = tg3_close,
14298 .ndo_start_xmit = tg3_start_xmit,
14299 .ndo_get_stats64 = tg3_get_stats64,
14300 .ndo_validate_addr = eth_validate_addr,
14301 .ndo_set_rx_mode = tg3_set_rx_mode,
14302 .ndo_set_mac_address = tg3_set_mac_addr,
14303 .ndo_do_ioctl = tg3_ioctl,
14304 .ndo_tx_timeout = tg3_tx_timeout,
14305 .ndo_change_mtu = tg3_change_mtu,
14306 .ndo_fix_features = tg3_fix_features,
14307 .ndo_set_features = tg3_set_features,
14308 #ifdef CONFIG_NET_POLL_CONTROLLER
14309 .ndo_poll_controller = tg3_poll_controller,
14313 static void tg3_get_eeprom_size(struct tg3 *tp)
14315 u32 cursize, val, magic;
14317 tp->nvram_size = EEPROM_CHIP_SIZE;
14319 if (tg3_nvram_read(tp, 0, &magic) != 0)
14322 if ((magic != TG3_EEPROM_MAGIC) &&
14323 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14324 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14328 * Size the chip by reading offsets at increasing powers of two.
14329 * When we encounter our validation signature, we know the addressing
14330 * has wrapped around, and thus have our chip size.
14334 while (cursize < tp->nvram_size) {
14335 if (tg3_nvram_read(tp, cursize, &val) != 0)
14344 tp->nvram_size = cursize;
14347 static void tg3_get_nvram_size(struct tg3 *tp)
14351 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14354 /* Selfboot format */
14355 if (val != TG3_EEPROM_MAGIC) {
14356 tg3_get_eeprom_size(tp);
14360 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14362 /* This is confusing. We want to operate on the
14363 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14364 * call will read from NVRAM and byteswap the data
14365 * according to the byteswapping settings for all
14366 * other register accesses. This ensures the data we
14367 * want will always reside in the lower 16-bits.
14368 * However, the data in NVRAM is in LE format, which
14369 * means the data from the NVRAM read will always be
14370 * opposite the endianness of the CPU. The 16-bit
14371 * byteswap then brings the data to CPU endianness.
14373 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14377 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14380 static void tg3_get_nvram_info(struct tg3 *tp)
14384 nvcfg1 = tr32(NVRAM_CFG1);
14385 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14386 tg3_flag_set(tp, FLASH);
14388 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14389 tw32(NVRAM_CFG1, nvcfg1);
14392 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14393 tg3_flag(tp, 5780_CLASS)) {
14394 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14395 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14396 tp->nvram_jedecnum = JEDEC_ATMEL;
14397 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14398 tg3_flag_set(tp, NVRAM_BUFFERED);
14400 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14401 tp->nvram_jedecnum = JEDEC_ATMEL;
14402 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14404 case FLASH_VENDOR_ATMEL_EEPROM:
14405 tp->nvram_jedecnum = JEDEC_ATMEL;
14406 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14407 tg3_flag_set(tp, NVRAM_BUFFERED);
14409 case FLASH_VENDOR_ST:
14410 tp->nvram_jedecnum = JEDEC_ST;
14411 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14412 tg3_flag_set(tp, NVRAM_BUFFERED);
14414 case FLASH_VENDOR_SAIFUN:
14415 tp->nvram_jedecnum = JEDEC_SAIFUN;
14416 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14418 case FLASH_VENDOR_SST_SMALL:
14419 case FLASH_VENDOR_SST_LARGE:
14420 tp->nvram_jedecnum = JEDEC_SST;
14421 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14425 tp->nvram_jedecnum = JEDEC_ATMEL;
14426 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14427 tg3_flag_set(tp, NVRAM_BUFFERED);
14431 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14433 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14434 case FLASH_5752PAGE_SIZE_256:
14435 tp->nvram_pagesize = 256;
14437 case FLASH_5752PAGE_SIZE_512:
14438 tp->nvram_pagesize = 512;
14440 case FLASH_5752PAGE_SIZE_1K:
14441 tp->nvram_pagesize = 1024;
14443 case FLASH_5752PAGE_SIZE_2K:
14444 tp->nvram_pagesize = 2048;
14446 case FLASH_5752PAGE_SIZE_4K:
14447 tp->nvram_pagesize = 4096;
14449 case FLASH_5752PAGE_SIZE_264:
14450 tp->nvram_pagesize = 264;
14452 case FLASH_5752PAGE_SIZE_528:
14453 tp->nvram_pagesize = 528;
14458 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14462 nvcfg1 = tr32(NVRAM_CFG1);
14464 /* NVRAM protection for TPM */
14465 if (nvcfg1 & (1 << 27))
14466 tg3_flag_set(tp, PROTECTED_NVRAM);
14468 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14469 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14470 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14471 tp->nvram_jedecnum = JEDEC_ATMEL;
14472 tg3_flag_set(tp, NVRAM_BUFFERED);
14474 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14475 tp->nvram_jedecnum = JEDEC_ATMEL;
14476 tg3_flag_set(tp, NVRAM_BUFFERED);
14477 tg3_flag_set(tp, FLASH);
14479 case FLASH_5752VENDOR_ST_M45PE10:
14480 case FLASH_5752VENDOR_ST_M45PE20:
14481 case FLASH_5752VENDOR_ST_M45PE40:
14482 tp->nvram_jedecnum = JEDEC_ST;
14483 tg3_flag_set(tp, NVRAM_BUFFERED);
14484 tg3_flag_set(tp, FLASH);
14488 if (tg3_flag(tp, FLASH)) {
14489 tg3_nvram_get_pagesize(tp, nvcfg1);
14491 /* For eeprom, set pagesize to maximum eeprom size */
14492 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14494 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14495 tw32(NVRAM_CFG1, nvcfg1);
14499 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14501 u32 nvcfg1, protect = 0;
14503 nvcfg1 = tr32(NVRAM_CFG1);
14505 /* NVRAM protection for TPM */
14506 if (nvcfg1 & (1 << 27)) {
14507 tg3_flag_set(tp, PROTECTED_NVRAM);
14511 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14513 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14514 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14515 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14516 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14517 tp->nvram_jedecnum = JEDEC_ATMEL;
14518 tg3_flag_set(tp, NVRAM_BUFFERED);
14519 tg3_flag_set(tp, FLASH);
14520 tp->nvram_pagesize = 264;
14521 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14522 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14523 tp->nvram_size = (protect ? 0x3e200 :
14524 TG3_NVRAM_SIZE_512KB);
14525 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14526 tp->nvram_size = (protect ? 0x1f200 :
14527 TG3_NVRAM_SIZE_256KB);
14529 tp->nvram_size = (protect ? 0x1f200 :
14530 TG3_NVRAM_SIZE_128KB);
14532 case FLASH_5752VENDOR_ST_M45PE10:
14533 case FLASH_5752VENDOR_ST_M45PE20:
14534 case FLASH_5752VENDOR_ST_M45PE40:
14535 tp->nvram_jedecnum = JEDEC_ST;
14536 tg3_flag_set(tp, NVRAM_BUFFERED);
14537 tg3_flag_set(tp, FLASH);
14538 tp->nvram_pagesize = 256;
14539 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14540 tp->nvram_size = (protect ?
14541 TG3_NVRAM_SIZE_64KB :
14542 TG3_NVRAM_SIZE_128KB);
14543 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14544 tp->nvram_size = (protect ?
14545 TG3_NVRAM_SIZE_64KB :
14546 TG3_NVRAM_SIZE_256KB);
14548 tp->nvram_size = (protect ?
14549 TG3_NVRAM_SIZE_128KB :
14550 TG3_NVRAM_SIZE_512KB);
14555 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14559 nvcfg1 = tr32(NVRAM_CFG1);
14561 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14562 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14563 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14564 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14565 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14566 tp->nvram_jedecnum = JEDEC_ATMEL;
14567 tg3_flag_set(tp, NVRAM_BUFFERED);
14568 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14570 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14571 tw32(NVRAM_CFG1, nvcfg1);
14573 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14574 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14575 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14576 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14577 tp->nvram_jedecnum = JEDEC_ATMEL;
14578 tg3_flag_set(tp, NVRAM_BUFFERED);
14579 tg3_flag_set(tp, FLASH);
14580 tp->nvram_pagesize = 264;
14582 case FLASH_5752VENDOR_ST_M45PE10:
14583 case FLASH_5752VENDOR_ST_M45PE20:
14584 case FLASH_5752VENDOR_ST_M45PE40:
14585 tp->nvram_jedecnum = JEDEC_ST;
14586 tg3_flag_set(tp, NVRAM_BUFFERED);
14587 tg3_flag_set(tp, FLASH);
14588 tp->nvram_pagesize = 256;
14593 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14595 u32 nvcfg1, protect = 0;
14597 nvcfg1 = tr32(NVRAM_CFG1);
14599 /* NVRAM protection for TPM */
14600 if (nvcfg1 & (1 << 27)) {
14601 tg3_flag_set(tp, PROTECTED_NVRAM);
14605 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14607 case FLASH_5761VENDOR_ATMEL_ADB021D:
14608 case FLASH_5761VENDOR_ATMEL_ADB041D:
14609 case FLASH_5761VENDOR_ATMEL_ADB081D:
14610 case FLASH_5761VENDOR_ATMEL_ADB161D:
14611 case FLASH_5761VENDOR_ATMEL_MDB021D:
14612 case FLASH_5761VENDOR_ATMEL_MDB041D:
14613 case FLASH_5761VENDOR_ATMEL_MDB081D:
14614 case FLASH_5761VENDOR_ATMEL_MDB161D:
14615 tp->nvram_jedecnum = JEDEC_ATMEL;
14616 tg3_flag_set(tp, NVRAM_BUFFERED);
14617 tg3_flag_set(tp, FLASH);
14618 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14619 tp->nvram_pagesize = 256;
14621 case FLASH_5761VENDOR_ST_A_M45PE20:
14622 case FLASH_5761VENDOR_ST_A_M45PE40:
14623 case FLASH_5761VENDOR_ST_A_M45PE80:
14624 case FLASH_5761VENDOR_ST_A_M45PE16:
14625 case FLASH_5761VENDOR_ST_M_M45PE20:
14626 case FLASH_5761VENDOR_ST_M_M45PE40:
14627 case FLASH_5761VENDOR_ST_M_M45PE80:
14628 case FLASH_5761VENDOR_ST_M_M45PE16:
14629 tp->nvram_jedecnum = JEDEC_ST;
14630 tg3_flag_set(tp, NVRAM_BUFFERED);
14631 tg3_flag_set(tp, FLASH);
14632 tp->nvram_pagesize = 256;
14637 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14640 case FLASH_5761VENDOR_ATMEL_ADB161D:
14641 case FLASH_5761VENDOR_ATMEL_MDB161D:
14642 case FLASH_5761VENDOR_ST_A_M45PE16:
14643 case FLASH_5761VENDOR_ST_M_M45PE16:
14644 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14646 case FLASH_5761VENDOR_ATMEL_ADB081D:
14647 case FLASH_5761VENDOR_ATMEL_MDB081D:
14648 case FLASH_5761VENDOR_ST_A_M45PE80:
14649 case FLASH_5761VENDOR_ST_M_M45PE80:
14650 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14652 case FLASH_5761VENDOR_ATMEL_ADB041D:
14653 case FLASH_5761VENDOR_ATMEL_MDB041D:
14654 case FLASH_5761VENDOR_ST_A_M45PE40:
14655 case FLASH_5761VENDOR_ST_M_M45PE40:
14656 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14658 case FLASH_5761VENDOR_ATMEL_ADB021D:
14659 case FLASH_5761VENDOR_ATMEL_MDB021D:
14660 case FLASH_5761VENDOR_ST_A_M45PE20:
14661 case FLASH_5761VENDOR_ST_M_M45PE20:
14662 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14668 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14670 tp->nvram_jedecnum = JEDEC_ATMEL;
14671 tg3_flag_set(tp, NVRAM_BUFFERED);
14672 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14675 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14679 nvcfg1 = tr32(NVRAM_CFG1);
14681 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14682 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14683 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14684 tp->nvram_jedecnum = JEDEC_ATMEL;
14685 tg3_flag_set(tp, NVRAM_BUFFERED);
14686 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14688 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14689 tw32(NVRAM_CFG1, nvcfg1);
14691 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14692 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14693 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14694 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14695 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14696 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14697 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14698 tp->nvram_jedecnum = JEDEC_ATMEL;
14699 tg3_flag_set(tp, NVRAM_BUFFERED);
14700 tg3_flag_set(tp, FLASH);
14702 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14703 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14704 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14705 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14706 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14708 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14709 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14710 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14712 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14713 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14714 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14718 case FLASH_5752VENDOR_ST_M45PE10:
14719 case FLASH_5752VENDOR_ST_M45PE20:
14720 case FLASH_5752VENDOR_ST_M45PE40:
14721 tp->nvram_jedecnum = JEDEC_ST;
14722 tg3_flag_set(tp, NVRAM_BUFFERED);
14723 tg3_flag_set(tp, FLASH);
14725 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14726 case FLASH_5752VENDOR_ST_M45PE10:
14727 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14729 case FLASH_5752VENDOR_ST_M45PE20:
14730 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14732 case FLASH_5752VENDOR_ST_M45PE40:
14733 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14738 tg3_flag_set(tp, NO_NVRAM);
14742 tg3_nvram_get_pagesize(tp, nvcfg1);
14743 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14744 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14748 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14752 nvcfg1 = tr32(NVRAM_CFG1);
14754 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14755 case FLASH_5717VENDOR_ATMEL_EEPROM:
14756 case FLASH_5717VENDOR_MICRO_EEPROM:
14757 tp->nvram_jedecnum = JEDEC_ATMEL;
14758 tg3_flag_set(tp, NVRAM_BUFFERED);
14759 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14761 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14762 tw32(NVRAM_CFG1, nvcfg1);
14764 case FLASH_5717VENDOR_ATMEL_MDB011D:
14765 case FLASH_5717VENDOR_ATMEL_ADB011B:
14766 case FLASH_5717VENDOR_ATMEL_ADB011D:
14767 case FLASH_5717VENDOR_ATMEL_MDB021D:
14768 case FLASH_5717VENDOR_ATMEL_ADB021B:
14769 case FLASH_5717VENDOR_ATMEL_ADB021D:
14770 case FLASH_5717VENDOR_ATMEL_45USPT:
14771 tp->nvram_jedecnum = JEDEC_ATMEL;
14772 tg3_flag_set(tp, NVRAM_BUFFERED);
14773 tg3_flag_set(tp, FLASH);
14775 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14776 case FLASH_5717VENDOR_ATMEL_MDB021D:
14777 /* Detect size with tg3_nvram_get_size() */
14779 case FLASH_5717VENDOR_ATMEL_ADB021B:
14780 case FLASH_5717VENDOR_ATMEL_ADB021D:
14781 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14784 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14788 case FLASH_5717VENDOR_ST_M_M25PE10:
14789 case FLASH_5717VENDOR_ST_A_M25PE10:
14790 case FLASH_5717VENDOR_ST_M_M45PE10:
14791 case FLASH_5717VENDOR_ST_A_M45PE10:
14792 case FLASH_5717VENDOR_ST_M_M25PE20:
14793 case FLASH_5717VENDOR_ST_A_M25PE20:
14794 case FLASH_5717VENDOR_ST_M_M45PE20:
14795 case FLASH_5717VENDOR_ST_A_M45PE20:
14796 case FLASH_5717VENDOR_ST_25USPT:
14797 case FLASH_5717VENDOR_ST_45USPT:
14798 tp->nvram_jedecnum = JEDEC_ST;
14799 tg3_flag_set(tp, NVRAM_BUFFERED);
14800 tg3_flag_set(tp, FLASH);
14802 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14803 case FLASH_5717VENDOR_ST_M_M25PE20:
14804 case FLASH_5717VENDOR_ST_M_M45PE20:
14805 /* Detect size with tg3_nvram_get_size() */
14807 case FLASH_5717VENDOR_ST_A_M25PE20:
14808 case FLASH_5717VENDOR_ST_A_M45PE20:
14809 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14812 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14817 tg3_flag_set(tp, NO_NVRAM);
14821 tg3_nvram_get_pagesize(tp, nvcfg1);
14822 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14823 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14826 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14828 u32 nvcfg1, nvmpinstrp, nv_status;
14830 nvcfg1 = tr32(NVRAM_CFG1);
14831 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14833 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14834 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14835 tg3_flag_set(tp, NO_NVRAM);
14839 switch (nvmpinstrp) {
14840 case FLASH_5762_MX25L_100:
14841 case FLASH_5762_MX25L_200:
14842 case FLASH_5762_MX25L_400:
14843 case FLASH_5762_MX25L_800:
14844 case FLASH_5762_MX25L_160_320:
14845 tp->nvram_pagesize = 4096;
14846 tp->nvram_jedecnum = JEDEC_MACRONIX;
14847 tg3_flag_set(tp, NVRAM_BUFFERED);
14848 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14849 tg3_flag_set(tp, FLASH);
14850 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14852 (1 << (nv_status >> AUTOSENSE_DEVID &
14853 AUTOSENSE_DEVID_MASK)
14854 << AUTOSENSE_SIZE_IN_MB);
14857 case FLASH_5762_EEPROM_HD:
14858 nvmpinstrp = FLASH_5720_EEPROM_HD;
14860 case FLASH_5762_EEPROM_LD:
14861 nvmpinstrp = FLASH_5720_EEPROM_LD;
14863 case FLASH_5720VENDOR_M_ST_M45PE20:
14864 /* This pinstrap supports multiple sizes, so force it
14865 * to read the actual size from location 0xf0.
14867 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14872 switch (nvmpinstrp) {
14873 case FLASH_5720_EEPROM_HD:
14874 case FLASH_5720_EEPROM_LD:
14875 tp->nvram_jedecnum = JEDEC_ATMEL;
14876 tg3_flag_set(tp, NVRAM_BUFFERED);
14878 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14879 tw32(NVRAM_CFG1, nvcfg1);
14880 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14881 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14883 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14885 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14886 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14887 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14888 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14889 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14890 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14891 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14892 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14893 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14894 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14895 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14896 case FLASH_5720VENDOR_ATMEL_45USPT:
14897 tp->nvram_jedecnum = JEDEC_ATMEL;
14898 tg3_flag_set(tp, NVRAM_BUFFERED);
14899 tg3_flag_set(tp, FLASH);
14901 switch (nvmpinstrp) {
14902 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14903 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14904 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14905 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14907 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14908 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14909 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14910 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14912 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14913 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14914 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14917 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14918 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14922 case FLASH_5720VENDOR_M_ST_M25PE10:
14923 case FLASH_5720VENDOR_M_ST_M45PE10:
14924 case FLASH_5720VENDOR_A_ST_M25PE10:
14925 case FLASH_5720VENDOR_A_ST_M45PE10:
14926 case FLASH_5720VENDOR_M_ST_M25PE20:
14927 case FLASH_5720VENDOR_M_ST_M45PE20:
14928 case FLASH_5720VENDOR_A_ST_M25PE20:
14929 case FLASH_5720VENDOR_A_ST_M45PE20:
14930 case FLASH_5720VENDOR_M_ST_M25PE40:
14931 case FLASH_5720VENDOR_M_ST_M45PE40:
14932 case FLASH_5720VENDOR_A_ST_M25PE40:
14933 case FLASH_5720VENDOR_A_ST_M45PE40:
14934 case FLASH_5720VENDOR_M_ST_M25PE80:
14935 case FLASH_5720VENDOR_M_ST_M45PE80:
14936 case FLASH_5720VENDOR_A_ST_M25PE80:
14937 case FLASH_5720VENDOR_A_ST_M45PE80:
14938 case FLASH_5720VENDOR_ST_25USPT:
14939 case FLASH_5720VENDOR_ST_45USPT:
14940 tp->nvram_jedecnum = JEDEC_ST;
14941 tg3_flag_set(tp, NVRAM_BUFFERED);
14942 tg3_flag_set(tp, FLASH);
14944 switch (nvmpinstrp) {
14945 case FLASH_5720VENDOR_M_ST_M25PE20:
14946 case FLASH_5720VENDOR_M_ST_M45PE20:
14947 case FLASH_5720VENDOR_A_ST_M25PE20:
14948 case FLASH_5720VENDOR_A_ST_M45PE20:
14949 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14951 case FLASH_5720VENDOR_M_ST_M25PE40:
14952 case FLASH_5720VENDOR_M_ST_M45PE40:
14953 case FLASH_5720VENDOR_A_ST_M25PE40:
14954 case FLASH_5720VENDOR_A_ST_M45PE40:
14955 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14957 case FLASH_5720VENDOR_M_ST_M25PE80:
14958 case FLASH_5720VENDOR_M_ST_M45PE80:
14959 case FLASH_5720VENDOR_A_ST_M25PE80:
14960 case FLASH_5720VENDOR_A_ST_M45PE80:
14961 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14964 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14965 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14970 tg3_flag_set(tp, NO_NVRAM);
14974 tg3_nvram_get_pagesize(tp, nvcfg1);
14975 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14976 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14978 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14981 if (tg3_nvram_read(tp, 0, &val))
14984 if (val != TG3_EEPROM_MAGIC &&
14985 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14986 tg3_flag_set(tp, NO_NVRAM);
14990 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14991 static void tg3_nvram_init(struct tg3 *tp)
14993 if (tg3_flag(tp, IS_SSB_CORE)) {
14994 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14995 tg3_flag_clear(tp, NVRAM);
14996 tg3_flag_clear(tp, NVRAM_BUFFERED);
14997 tg3_flag_set(tp, NO_NVRAM);
15001 tw32_f(GRC_EEPROM_ADDR,
15002 (EEPROM_ADDR_FSM_RESET |
15003 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15004 EEPROM_ADDR_CLKPERD_SHIFT)));
15008 /* Enable seeprom accesses. */
15009 tw32_f(GRC_LOCAL_CTRL,
15010 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15013 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15014 tg3_asic_rev(tp) != ASIC_REV_5701) {
15015 tg3_flag_set(tp, NVRAM);
15017 if (tg3_nvram_lock(tp)) {
15018 netdev_warn(tp->dev,
15019 "Cannot get nvram lock, %s failed\n",
15023 tg3_enable_nvram_access(tp);
15025 tp->nvram_size = 0;
15027 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15028 tg3_get_5752_nvram_info(tp);
15029 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15030 tg3_get_5755_nvram_info(tp);
15031 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15032 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15033 tg3_asic_rev(tp) == ASIC_REV_5785)
15034 tg3_get_5787_nvram_info(tp);
15035 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15036 tg3_get_5761_nvram_info(tp);
15037 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15038 tg3_get_5906_nvram_info(tp);
15039 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15040 tg3_flag(tp, 57765_CLASS))
15041 tg3_get_57780_nvram_info(tp);
15042 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15043 tg3_asic_rev(tp) == ASIC_REV_5719)
15044 tg3_get_5717_nvram_info(tp);
15045 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15046 tg3_asic_rev(tp) == ASIC_REV_5762)
15047 tg3_get_5720_nvram_info(tp);
15049 tg3_get_nvram_info(tp);
15051 if (tp->nvram_size == 0)
15052 tg3_get_nvram_size(tp);
15054 tg3_disable_nvram_access(tp);
15055 tg3_nvram_unlock(tp);
15058 tg3_flag_clear(tp, NVRAM);
15059 tg3_flag_clear(tp, NVRAM_BUFFERED);
15061 tg3_get_eeprom_size(tp);
15065 struct subsys_tbl_ent {
15066 u16 subsys_vendor, subsys_devid;
15070 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15071 /* Broadcom boards. */
15072 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15073 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15074 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15075 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15076 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15077 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15078 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15079 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15080 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15081 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15083 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15084 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15085 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15086 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15087 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15088 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15089 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15090 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15091 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15092 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15093 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15096 { TG3PCI_SUBVENDOR_ID_3COM,
15097 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15098 { TG3PCI_SUBVENDOR_ID_3COM,
15099 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15100 { TG3PCI_SUBVENDOR_ID_3COM,
15101 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15102 { TG3PCI_SUBVENDOR_ID_3COM,
15103 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15104 { TG3PCI_SUBVENDOR_ID_3COM,
15105 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15108 { TG3PCI_SUBVENDOR_ID_DELL,
15109 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15110 { TG3PCI_SUBVENDOR_ID_DELL,
15111 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15112 { TG3PCI_SUBVENDOR_ID_DELL,
15113 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15114 { TG3PCI_SUBVENDOR_ID_DELL,
15115 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15117 /* Compaq boards. */
15118 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15119 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15120 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15121 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15122 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15123 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15124 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15125 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15126 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15127 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15130 { TG3PCI_SUBVENDOR_ID_IBM,
15131 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15134 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15138 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15139 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15140 tp->pdev->subsystem_vendor) &&
15141 (subsys_id_to_phy_id[i].subsys_devid ==
15142 tp->pdev->subsystem_device))
15143 return &subsys_id_to_phy_id[i];
15148 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15152 tp->phy_id = TG3_PHY_ID_INVALID;
15153 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15155 /* Assume an onboard device and WOL capable by default. */
15156 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15157 tg3_flag_set(tp, WOL_CAP);
15159 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15160 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15161 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15162 tg3_flag_set(tp, IS_NIC);
15164 val = tr32(VCPU_CFGSHDW);
15165 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15166 tg3_flag_set(tp, ASPM_WORKAROUND);
15167 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15168 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15169 tg3_flag_set(tp, WOL_ENABLE);
15170 device_set_wakeup_enable(&tp->pdev->dev, true);
15175 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15176 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15177 u32 nic_cfg, led_cfg;
15178 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15179 u32 nic_phy_id, ver, eeprom_phy_id;
15180 int eeprom_phy_serdes = 0;
15182 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15183 tp->nic_sram_data_cfg = nic_cfg;
15185 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15186 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15187 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15188 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15189 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15190 (ver > 0) && (ver < 0x100))
15191 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15193 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15194 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15196 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15197 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15198 tg3_asic_rev(tp) == ASIC_REV_5720)
15199 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15201 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15202 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15203 eeprom_phy_serdes = 1;
15205 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15206 if (nic_phy_id != 0) {
15207 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15208 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15210 eeprom_phy_id = (id1 >> 16) << 10;
15211 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15212 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15216 tp->phy_id = eeprom_phy_id;
15217 if (eeprom_phy_serdes) {
15218 if (!tg3_flag(tp, 5705_PLUS))
15219 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15221 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15224 if (tg3_flag(tp, 5750_PLUS))
15225 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15226 SHASTA_EXT_LED_MODE_MASK);
15228 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15232 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15233 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15236 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15237 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15240 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15241 tp->led_ctrl = LED_CTRL_MODE_MAC;
15243 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15244 * read on some older 5700/5701 bootcode.
15246 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15247 tg3_asic_rev(tp) == ASIC_REV_5701)
15248 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15252 case SHASTA_EXT_LED_SHARED:
15253 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15254 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15255 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15256 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15257 LED_CTRL_MODE_PHY_2);
15259 if (tg3_flag(tp, 5717_PLUS) ||
15260 tg3_asic_rev(tp) == ASIC_REV_5762)
15261 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15262 LED_CTRL_BLINK_RATE_MASK;
15266 case SHASTA_EXT_LED_MAC:
15267 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15270 case SHASTA_EXT_LED_COMBO:
15271 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15272 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15273 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15274 LED_CTRL_MODE_PHY_2);
15279 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15280 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15281 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15282 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15284 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15285 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15287 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15288 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15289 if ((tp->pdev->subsystem_vendor ==
15290 PCI_VENDOR_ID_ARIMA) &&
15291 (tp->pdev->subsystem_device == 0x205a ||
15292 tp->pdev->subsystem_device == 0x2063))
15293 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15295 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15296 tg3_flag_set(tp, IS_NIC);
15299 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15300 tg3_flag_set(tp, ENABLE_ASF);
15301 if (tg3_flag(tp, 5750_PLUS))
15302 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15305 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15306 tg3_flag(tp, 5750_PLUS))
15307 tg3_flag_set(tp, ENABLE_APE);
15309 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15310 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15311 tg3_flag_clear(tp, WOL_CAP);
15313 if (tg3_flag(tp, WOL_CAP) &&
15314 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15315 tg3_flag_set(tp, WOL_ENABLE);
15316 device_set_wakeup_enable(&tp->pdev->dev, true);
15319 if (cfg2 & (1 << 17))
15320 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15322 /* serdes signal pre-emphasis in register 0x590 set by */
15323 /* bootcode if bit 18 is set */
15324 if (cfg2 & (1 << 18))
15325 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15327 if ((tg3_flag(tp, 57765_PLUS) ||
15328 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15329 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15330 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15331 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15333 if (tg3_flag(tp, PCI_EXPRESS)) {
15336 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15337 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15338 !tg3_flag(tp, 57765_PLUS) &&
15339 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15340 tg3_flag_set(tp, ASPM_WORKAROUND);
15341 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15342 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15343 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15344 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15347 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15348 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15349 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15350 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15351 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15352 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15354 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15355 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15358 if (tg3_flag(tp, WOL_CAP))
15359 device_set_wakeup_enable(&tp->pdev->dev,
15360 tg3_flag(tp, WOL_ENABLE));
15362 device_set_wakeup_capable(&tp->pdev->dev, false);
15365 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15368 u32 val2, off = offset * 8;
15370 err = tg3_nvram_lock(tp);
15374 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15375 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15376 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15377 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15380 for (i = 0; i < 100; i++) {
15381 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15382 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15383 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15389 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15391 tg3_nvram_unlock(tp);
15392 if (val2 & APE_OTP_STATUS_CMD_DONE)
15398 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15403 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15404 tw32(OTP_CTRL, cmd);
15406 /* Wait for up to 1 ms for command to execute. */
15407 for (i = 0; i < 100; i++) {
15408 val = tr32(OTP_STATUS);
15409 if (val & OTP_STATUS_CMD_DONE)
15414 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15417 /* Read the gphy configuration from the OTP region of the chip. The gphy
15418 * configuration is a 32-bit value that straddles the alignment boundary.
15419 * We do two 32-bit reads and then shift and merge the results.
15421 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15423 u32 bhalf_otp, thalf_otp;
15425 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15427 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15430 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15432 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15435 thalf_otp = tr32(OTP_READ_DATA);
15437 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15439 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15442 bhalf_otp = tr32(OTP_READ_DATA);
15444 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15447 static void tg3_phy_init_link_config(struct tg3 *tp)
15449 u32 adv = ADVERTISED_Autoneg;
15451 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15452 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15453 adv |= ADVERTISED_1000baseT_Half;
15454 adv |= ADVERTISED_1000baseT_Full;
15457 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15458 adv |= ADVERTISED_100baseT_Half |
15459 ADVERTISED_100baseT_Full |
15460 ADVERTISED_10baseT_Half |
15461 ADVERTISED_10baseT_Full |
15464 adv |= ADVERTISED_FIBRE;
15466 tp->link_config.advertising = adv;
15467 tp->link_config.speed = SPEED_UNKNOWN;
15468 tp->link_config.duplex = DUPLEX_UNKNOWN;
15469 tp->link_config.autoneg = AUTONEG_ENABLE;
15470 tp->link_config.active_speed = SPEED_UNKNOWN;
15471 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15476 static int tg3_phy_probe(struct tg3 *tp)
15478 u32 hw_phy_id_1, hw_phy_id_2;
15479 u32 hw_phy_id, hw_phy_id_masked;
15482 /* flow control autonegotiation is default behavior */
15483 tg3_flag_set(tp, PAUSE_AUTONEG);
15484 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15486 if (tg3_flag(tp, ENABLE_APE)) {
15487 switch (tp->pci_fn) {
15489 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15492 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15495 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15498 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15503 if (!tg3_flag(tp, ENABLE_ASF) &&
15504 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15505 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15506 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15507 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15509 if (tg3_flag(tp, USE_PHYLIB))
15510 return tg3_phy_init(tp);
15512 /* Reading the PHY ID register can conflict with ASF
15513 * firmware access to the PHY hardware.
15516 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15517 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15519 /* Now read the physical PHY_ID from the chip and verify
15520 * that it is sane. If it doesn't look good, we fall back
15521 * to either the hard-coded table based PHY_ID and failing
15522 * that the value found in the eeprom area.
15524 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15525 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15527 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15528 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15529 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15531 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15534 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15535 tp->phy_id = hw_phy_id;
15536 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15537 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15539 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15541 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15542 /* Do nothing, phy ID already set up in
15543 * tg3_get_eeprom_hw_cfg().
15546 struct subsys_tbl_ent *p;
15548 /* No eeprom signature? Try the hardcoded
15549 * subsys device table.
15551 p = tg3_lookup_by_subsys(tp);
15553 tp->phy_id = p->phy_id;
15554 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15555 /* For now we saw the IDs 0xbc050cd0,
15556 * 0xbc050f80 and 0xbc050c30 on devices
15557 * connected to an BCM4785 and there are
15558 * probably more. Just assume that the phy is
15559 * supported when it is connected to a SSB core
15566 tp->phy_id == TG3_PHY_ID_BCM8002)
15567 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15571 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15572 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15573 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15574 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15575 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15576 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15577 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15578 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15579 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15580 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15582 tp->eee.supported = SUPPORTED_100baseT_Full |
15583 SUPPORTED_1000baseT_Full;
15584 tp->eee.advertised = ADVERTISED_100baseT_Full |
15585 ADVERTISED_1000baseT_Full;
15586 tp->eee.eee_enabled = 1;
15587 tp->eee.tx_lpi_enabled = 1;
15588 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15591 tg3_phy_init_link_config(tp);
15593 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15594 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15595 !tg3_flag(tp, ENABLE_APE) &&
15596 !tg3_flag(tp, ENABLE_ASF)) {
15599 tg3_readphy(tp, MII_BMSR, &bmsr);
15600 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15601 (bmsr & BMSR_LSTATUS))
15602 goto skip_phy_reset;
15604 err = tg3_phy_reset(tp);
15608 tg3_phy_set_wirespeed(tp);
15610 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15611 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15612 tp->link_config.flowctrl);
15614 tg3_writephy(tp, MII_BMCR,
15615 BMCR_ANENABLE | BMCR_ANRESTART);
15620 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15621 err = tg3_init_5401phy_dsp(tp);
15625 err = tg3_init_5401phy_dsp(tp);
15631 static void tg3_read_vpd(struct tg3 *tp)
15634 unsigned int block_end, rosize, len;
15638 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15642 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15644 goto out_not_found;
15646 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15647 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15648 i += PCI_VPD_LRDT_TAG_SIZE;
15650 if (block_end > vpdlen)
15651 goto out_not_found;
15653 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15654 PCI_VPD_RO_KEYWORD_MFR_ID);
15656 len = pci_vpd_info_field_size(&vpd_data[j]);
15658 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15659 if (j + len > block_end || len != 4 ||
15660 memcmp(&vpd_data[j], "1028", 4))
15663 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15664 PCI_VPD_RO_KEYWORD_VENDOR0);
15668 len = pci_vpd_info_field_size(&vpd_data[j]);
15670 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15671 if (j + len > block_end)
15674 if (len >= sizeof(tp->fw_ver))
15675 len = sizeof(tp->fw_ver) - 1;
15676 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15677 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15682 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15683 PCI_VPD_RO_KEYWORD_PARTNO);
15685 goto out_not_found;
15687 len = pci_vpd_info_field_size(&vpd_data[i]);
15689 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15690 if (len > TG3_BPN_SIZE ||
15691 (len + i) > vpdlen)
15692 goto out_not_found;
15694 memcpy(tp->board_part_number, &vpd_data[i], len);
15698 if (tp->board_part_number[0])
15702 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15703 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15704 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15705 strcpy(tp->board_part_number, "BCM5717");
15706 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15707 strcpy(tp->board_part_number, "BCM5718");
15710 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15711 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15712 strcpy(tp->board_part_number, "BCM57780");
15713 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15714 strcpy(tp->board_part_number, "BCM57760");
15715 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15716 strcpy(tp->board_part_number, "BCM57790");
15717 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15718 strcpy(tp->board_part_number, "BCM57788");
15721 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15722 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15723 strcpy(tp->board_part_number, "BCM57761");
15724 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15725 strcpy(tp->board_part_number, "BCM57765");
15726 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15727 strcpy(tp->board_part_number, "BCM57781");
15728 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15729 strcpy(tp->board_part_number, "BCM57785");
15730 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15731 strcpy(tp->board_part_number, "BCM57791");
15732 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15733 strcpy(tp->board_part_number, "BCM57795");
15736 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15737 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15738 strcpy(tp->board_part_number, "BCM57762");
15739 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15740 strcpy(tp->board_part_number, "BCM57766");
15741 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15742 strcpy(tp->board_part_number, "BCM57782");
15743 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15744 strcpy(tp->board_part_number, "BCM57786");
15747 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15748 strcpy(tp->board_part_number, "BCM95906");
15751 strcpy(tp->board_part_number, "none");
15755 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15759 if (tg3_nvram_read(tp, offset, &val) ||
15760 (val & 0xfc000000) != 0x0c000000 ||
15761 tg3_nvram_read(tp, offset + 4, &val) ||
15768 static void tg3_read_bc_ver(struct tg3 *tp)
15770 u32 val, offset, start, ver_offset;
15772 bool newver = false;
15774 if (tg3_nvram_read(tp, 0xc, &offset) ||
15775 tg3_nvram_read(tp, 0x4, &start))
15778 offset = tg3_nvram_logical_addr(tp, offset);
15780 if (tg3_nvram_read(tp, offset, &val))
15783 if ((val & 0xfc000000) == 0x0c000000) {
15784 if (tg3_nvram_read(tp, offset + 4, &val))
15791 dst_off = strlen(tp->fw_ver);
15794 if (TG3_VER_SIZE - dst_off < 16 ||
15795 tg3_nvram_read(tp, offset + 8, &ver_offset))
15798 offset = offset + ver_offset - start;
15799 for (i = 0; i < 16; i += 4) {
15801 if (tg3_nvram_read_be32(tp, offset + i, &v))
15804 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15809 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15812 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15813 TG3_NVM_BCVER_MAJSFT;
15814 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15815 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15816 "v%d.%02d", major, minor);
15820 static void tg3_read_hwsb_ver(struct tg3 *tp)
15822 u32 val, major, minor;
15824 /* Use native endian representation */
15825 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15828 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15829 TG3_NVM_HWSB_CFG1_MAJSFT;
15830 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15831 TG3_NVM_HWSB_CFG1_MINSFT;
15833 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15836 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15838 u32 offset, major, minor, build;
15840 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15842 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15845 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15846 case TG3_EEPROM_SB_REVISION_0:
15847 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15849 case TG3_EEPROM_SB_REVISION_2:
15850 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15852 case TG3_EEPROM_SB_REVISION_3:
15853 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15855 case TG3_EEPROM_SB_REVISION_4:
15856 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15858 case TG3_EEPROM_SB_REVISION_5:
15859 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15861 case TG3_EEPROM_SB_REVISION_6:
15862 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15868 if (tg3_nvram_read(tp, offset, &val))
15871 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15872 TG3_EEPROM_SB_EDH_BLD_SHFT;
15873 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15874 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15875 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15877 if (minor > 99 || build > 26)
15880 offset = strlen(tp->fw_ver);
15881 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15882 " v%d.%02d", major, minor);
15885 offset = strlen(tp->fw_ver);
15886 if (offset < TG3_VER_SIZE - 1)
15887 tp->fw_ver[offset] = 'a' + build - 1;
15891 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15893 u32 val, offset, start;
15896 for (offset = TG3_NVM_DIR_START;
15897 offset < TG3_NVM_DIR_END;
15898 offset += TG3_NVM_DIRENT_SIZE) {
15899 if (tg3_nvram_read(tp, offset, &val))
15902 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15906 if (offset == TG3_NVM_DIR_END)
15909 if (!tg3_flag(tp, 5705_PLUS))
15910 start = 0x08000000;
15911 else if (tg3_nvram_read(tp, offset - 4, &start))
15914 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15915 !tg3_fw_img_is_valid(tp, offset) ||
15916 tg3_nvram_read(tp, offset + 8, &val))
15919 offset += val - start;
15921 vlen = strlen(tp->fw_ver);
15923 tp->fw_ver[vlen++] = ',';
15924 tp->fw_ver[vlen++] = ' ';
15926 for (i = 0; i < 4; i++) {
15928 if (tg3_nvram_read_be32(tp, offset, &v))
15931 offset += sizeof(v);
15933 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15934 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15938 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15943 static void tg3_probe_ncsi(struct tg3 *tp)
15947 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15948 if (apedata != APE_SEG_SIG_MAGIC)
15951 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15952 if (!(apedata & APE_FW_STATUS_READY))
15955 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15956 tg3_flag_set(tp, APE_HAS_NCSI);
15959 static void tg3_read_dash_ver(struct tg3 *tp)
15965 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15967 if (tg3_flag(tp, APE_HAS_NCSI))
15969 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15974 vlen = strlen(tp->fw_ver);
15976 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15978 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15979 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15980 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15981 (apedata & APE_FW_VERSION_BLDMSK));
15984 static void tg3_read_otp_ver(struct tg3 *tp)
15988 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15991 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15992 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15993 TG3_OTP_MAGIC0_VALID(val)) {
15994 u64 val64 = (u64) val << 32 | val2;
15998 for (i = 0; i < 7; i++) {
15999 if ((val64 & 0xff) == 0)
16001 ver = val64 & 0xff;
16004 vlen = strlen(tp->fw_ver);
16005 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16009 static void tg3_read_fw_ver(struct tg3 *tp)
16012 bool vpd_vers = false;
16014 if (tp->fw_ver[0] != 0)
16017 if (tg3_flag(tp, NO_NVRAM)) {
16018 strcat(tp->fw_ver, "sb");
16019 tg3_read_otp_ver(tp);
16023 if (tg3_nvram_read(tp, 0, &val))
16026 if (val == TG3_EEPROM_MAGIC)
16027 tg3_read_bc_ver(tp);
16028 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16029 tg3_read_sb_ver(tp, val);
16030 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16031 tg3_read_hwsb_ver(tp);
16033 if (tg3_flag(tp, ENABLE_ASF)) {
16034 if (tg3_flag(tp, ENABLE_APE)) {
16035 tg3_probe_ncsi(tp);
16037 tg3_read_dash_ver(tp);
16038 } else if (!vpd_vers) {
16039 tg3_read_mgmtfw_ver(tp);
16043 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16046 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16048 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16049 return TG3_RX_RET_MAX_SIZE_5717;
16050 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16051 return TG3_RX_RET_MAX_SIZE_5700;
16053 return TG3_RX_RET_MAX_SIZE_5705;
16056 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16057 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16058 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16059 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16063 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16065 struct pci_dev *peer;
16066 unsigned int func, devnr = tp->pdev->devfn & ~7;
16068 for (func = 0; func < 8; func++) {
16069 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16070 if (peer && peer != tp->pdev)
16074 /* 5704 can be configured in single-port mode, set peer to
16075 * tp->pdev in that case.
16083 * We don't need to keep the refcount elevated; there's no way
16084 * to remove one half of this device without removing the other
16091 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16093 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16094 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16097 /* All devices that use the alternate
16098 * ASIC REV location have a CPMU.
16100 tg3_flag_set(tp, CPMU_PRESENT);
16102 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16104 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16105 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16106 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16107 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16111 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16112 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16113 reg = TG3PCI_GEN2_PRODID_ASICREV;
16114 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16115 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16116 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16117 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16118 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16119 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16120 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16121 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16122 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16123 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16124 reg = TG3PCI_GEN15_PRODID_ASICREV;
16126 reg = TG3PCI_PRODID_ASICREV;
16128 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16131 /* Wrong chip ID in 5752 A0. This code can be removed later
16132 * as A0 is not in production.
16134 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16135 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16137 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16138 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16140 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16141 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16142 tg3_asic_rev(tp) == ASIC_REV_5720)
16143 tg3_flag_set(tp, 5717_PLUS);
16145 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16146 tg3_asic_rev(tp) == ASIC_REV_57766)
16147 tg3_flag_set(tp, 57765_CLASS);
16149 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16150 tg3_asic_rev(tp) == ASIC_REV_5762)
16151 tg3_flag_set(tp, 57765_PLUS);
16153 /* Intentionally exclude ASIC_REV_5906 */
16154 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16155 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16156 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16157 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16158 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16159 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16160 tg3_flag(tp, 57765_PLUS))
16161 tg3_flag_set(tp, 5755_PLUS);
16163 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16164 tg3_asic_rev(tp) == ASIC_REV_5714)
16165 tg3_flag_set(tp, 5780_CLASS);
16167 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16168 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16169 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16170 tg3_flag(tp, 5755_PLUS) ||
16171 tg3_flag(tp, 5780_CLASS))
16172 tg3_flag_set(tp, 5750_PLUS);
16174 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16175 tg3_flag(tp, 5750_PLUS))
16176 tg3_flag_set(tp, 5705_PLUS);
16179 static bool tg3_10_100_only_device(struct tg3 *tp,
16180 const struct pci_device_id *ent)
16182 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16184 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16185 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16186 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16189 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16190 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16191 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16201 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16204 u32 pci_state_reg, grc_misc_cfg;
16209 /* Force memory write invalidate off. If we leave it on,
16210 * then on 5700_BX chips we have to enable a workaround.
16211 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16212 * to match the cacheline size. The Broadcom driver have this
16213 * workaround but turns MWI off all the times so never uses
16214 * it. This seems to suggest that the workaround is insufficient.
16216 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16217 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16218 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16220 /* Important! -- Make sure register accesses are byteswapped
16221 * correctly. Also, for those chips that require it, make
16222 * sure that indirect register accesses are enabled before
16223 * the first operation.
16225 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16227 tp->misc_host_ctrl |= (misc_ctrl_reg &
16228 MISC_HOST_CTRL_CHIPREV);
16229 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16230 tp->misc_host_ctrl);
16232 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16234 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16235 * we need to disable memory and use config. cycles
16236 * only to access all registers. The 5702/03 chips
16237 * can mistakenly decode the special cycles from the
16238 * ICH chipsets as memory write cycles, causing corruption
16239 * of register and memory space. Only certain ICH bridges
16240 * will drive special cycles with non-zero data during the
16241 * address phase which can fall within the 5703's address
16242 * range. This is not an ICH bug as the PCI spec allows
16243 * non-zero address during special cycles. However, only
16244 * these ICH bridges are known to drive non-zero addresses
16245 * during special cycles.
16247 * Since special cycles do not cross PCI bridges, we only
16248 * enable this workaround if the 5703 is on the secondary
16249 * bus of these ICH bridges.
16251 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16252 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16253 static struct tg3_dev_id {
16257 } ich_chipsets[] = {
16258 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16260 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16262 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16264 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16268 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16269 struct pci_dev *bridge = NULL;
16271 while (pci_id->vendor != 0) {
16272 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16278 if (pci_id->rev != PCI_ANY_ID) {
16279 if (bridge->revision > pci_id->rev)
16282 if (bridge->subordinate &&
16283 (bridge->subordinate->number ==
16284 tp->pdev->bus->number)) {
16285 tg3_flag_set(tp, ICH_WORKAROUND);
16286 pci_dev_put(bridge);
16292 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16293 static struct tg3_dev_id {
16296 } bridge_chipsets[] = {
16297 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16298 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16301 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16302 struct pci_dev *bridge = NULL;
16304 while (pci_id->vendor != 0) {
16305 bridge = pci_get_device(pci_id->vendor,
16312 if (bridge->subordinate &&
16313 (bridge->subordinate->number <=
16314 tp->pdev->bus->number) &&
16315 (bridge->subordinate->busn_res.end >=
16316 tp->pdev->bus->number)) {
16317 tg3_flag_set(tp, 5701_DMA_BUG);
16318 pci_dev_put(bridge);
16324 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16325 * DMA addresses > 40-bit. This bridge may have other additional
16326 * 57xx devices behind it in some 4-port NIC designs for example.
16327 * Any tg3 device found behind the bridge will also need the 40-bit
16330 if (tg3_flag(tp, 5780_CLASS)) {
16331 tg3_flag_set(tp, 40BIT_DMA_BUG);
16332 tp->msi_cap = tp->pdev->msi_cap;
16334 struct pci_dev *bridge = NULL;
16337 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16338 PCI_DEVICE_ID_SERVERWORKS_EPB,
16340 if (bridge && bridge->subordinate &&
16341 (bridge->subordinate->number <=
16342 tp->pdev->bus->number) &&
16343 (bridge->subordinate->busn_res.end >=
16344 tp->pdev->bus->number)) {
16345 tg3_flag_set(tp, 40BIT_DMA_BUG);
16346 pci_dev_put(bridge);
16352 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16353 tg3_asic_rev(tp) == ASIC_REV_5714)
16354 tp->pdev_peer = tg3_find_peer(tp);
16356 /* Determine TSO capabilities */
16357 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16358 ; /* Do nothing. HW bug. */
16359 else if (tg3_flag(tp, 57765_PLUS))
16360 tg3_flag_set(tp, HW_TSO_3);
16361 else if (tg3_flag(tp, 5755_PLUS) ||
16362 tg3_asic_rev(tp) == ASIC_REV_5906)
16363 tg3_flag_set(tp, HW_TSO_2);
16364 else if (tg3_flag(tp, 5750_PLUS)) {
16365 tg3_flag_set(tp, HW_TSO_1);
16366 tg3_flag_set(tp, TSO_BUG);
16367 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16368 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16369 tg3_flag_clear(tp, TSO_BUG);
16370 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16371 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16372 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16373 tg3_flag_set(tp, FW_TSO);
16374 tg3_flag_set(tp, TSO_BUG);
16375 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16376 tp->fw_needed = FIRMWARE_TG3TSO5;
16378 tp->fw_needed = FIRMWARE_TG3TSO;
16381 /* Selectively allow TSO based on operating conditions */
16382 if (tg3_flag(tp, HW_TSO_1) ||
16383 tg3_flag(tp, HW_TSO_2) ||
16384 tg3_flag(tp, HW_TSO_3) ||
16385 tg3_flag(tp, FW_TSO)) {
16386 /* For firmware TSO, assume ASF is disabled.
16387 * We'll disable TSO later if we discover ASF
16388 * is enabled in tg3_get_eeprom_hw_cfg().
16390 tg3_flag_set(tp, TSO_CAPABLE);
16392 tg3_flag_clear(tp, TSO_CAPABLE);
16393 tg3_flag_clear(tp, TSO_BUG);
16394 tp->fw_needed = NULL;
16397 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16398 tp->fw_needed = FIRMWARE_TG3;
16400 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16401 tp->fw_needed = FIRMWARE_TG357766;
16405 if (tg3_flag(tp, 5750_PLUS)) {
16406 tg3_flag_set(tp, SUPPORT_MSI);
16407 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16408 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16409 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16410 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16411 tp->pdev_peer == tp->pdev))
16412 tg3_flag_clear(tp, SUPPORT_MSI);
16414 if (tg3_flag(tp, 5755_PLUS) ||
16415 tg3_asic_rev(tp) == ASIC_REV_5906) {
16416 tg3_flag_set(tp, 1SHOT_MSI);
16419 if (tg3_flag(tp, 57765_PLUS)) {
16420 tg3_flag_set(tp, SUPPORT_MSIX);
16421 tp->irq_max = TG3_IRQ_MAX_VECS;
16427 if (tp->irq_max > 1) {
16428 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16429 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16431 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16432 tg3_asic_rev(tp) == ASIC_REV_5720)
16433 tp->txq_max = tp->irq_max - 1;
16436 if (tg3_flag(tp, 5755_PLUS) ||
16437 tg3_asic_rev(tp) == ASIC_REV_5906)
16438 tg3_flag_set(tp, SHORT_DMA_BUG);
16440 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16441 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16443 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16444 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16445 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16446 tg3_asic_rev(tp) == ASIC_REV_5762)
16447 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16449 if (tg3_flag(tp, 57765_PLUS) &&
16450 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16451 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16453 if (!tg3_flag(tp, 5705_PLUS) ||
16454 tg3_flag(tp, 5780_CLASS) ||
16455 tg3_flag(tp, USE_JUMBO_BDFLAG))
16456 tg3_flag_set(tp, JUMBO_CAPABLE);
16458 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16461 if (pci_is_pcie(tp->pdev)) {
16464 tg3_flag_set(tp, PCI_EXPRESS);
16466 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16467 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16468 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16469 tg3_flag_clear(tp, HW_TSO_2);
16470 tg3_flag_clear(tp, TSO_CAPABLE);
16472 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16473 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16474 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16475 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16476 tg3_flag_set(tp, CLKREQ_BUG);
16477 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16478 tg3_flag_set(tp, L1PLLPD_EN);
16480 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16481 /* BCM5785 devices are effectively PCIe devices, and should
16482 * follow PCIe codepaths, but do not have a PCIe capabilities
16485 tg3_flag_set(tp, PCI_EXPRESS);
16486 } else if (!tg3_flag(tp, 5705_PLUS) ||
16487 tg3_flag(tp, 5780_CLASS)) {
16488 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16489 if (!tp->pcix_cap) {
16490 dev_err(&tp->pdev->dev,
16491 "Cannot find PCI-X capability, aborting\n");
16495 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16496 tg3_flag_set(tp, PCIX_MODE);
16499 /* If we have an AMD 762 or VIA K8T800 chipset, write
16500 * reordering to the mailbox registers done by the host
16501 * controller can cause major troubles. We read back from
16502 * every mailbox register write to force the writes to be
16503 * posted to the chip in order.
16505 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16506 !tg3_flag(tp, PCI_EXPRESS))
16507 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16509 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16510 &tp->pci_cacheline_sz);
16511 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16512 &tp->pci_lat_timer);
16513 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16514 tp->pci_lat_timer < 64) {
16515 tp->pci_lat_timer = 64;
16516 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16517 tp->pci_lat_timer);
16520 /* Important! -- It is critical that the PCI-X hw workaround
16521 * situation is decided before the first MMIO register access.
16523 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16524 /* 5700 BX chips need to have their TX producer index
16525 * mailboxes written twice to workaround a bug.
16527 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16529 /* If we are in PCI-X mode, enable register write workaround.
16531 * The workaround is to use indirect register accesses
16532 * for all chip writes not to mailbox registers.
16534 if (tg3_flag(tp, PCIX_MODE)) {
16537 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16539 /* The chip can have it's power management PCI config
16540 * space registers clobbered due to this bug.
16541 * So explicitly force the chip into D0 here.
16543 pci_read_config_dword(tp->pdev,
16544 tp->pdev->pm_cap + PCI_PM_CTRL,
16546 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16547 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16548 pci_write_config_dword(tp->pdev,
16549 tp->pdev->pm_cap + PCI_PM_CTRL,
16552 /* Also, force SERR#/PERR# in PCI command. */
16553 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16554 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16555 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16559 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16560 tg3_flag_set(tp, PCI_HIGH_SPEED);
16561 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16562 tg3_flag_set(tp, PCI_32BIT);
16564 /* Chip-specific fixup from Broadcom driver */
16565 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16566 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16567 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16568 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16571 /* Default fast path register access methods */
16572 tp->read32 = tg3_read32;
16573 tp->write32 = tg3_write32;
16574 tp->read32_mbox = tg3_read32;
16575 tp->write32_mbox = tg3_write32;
16576 tp->write32_tx_mbox = tg3_write32;
16577 tp->write32_rx_mbox = tg3_write32;
16579 /* Various workaround register access methods */
16580 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16581 tp->write32 = tg3_write_indirect_reg32;
16582 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16583 (tg3_flag(tp, PCI_EXPRESS) &&
16584 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16586 * Back to back register writes can cause problems on these
16587 * chips, the workaround is to read back all reg writes
16588 * except those to mailbox regs.
16590 * See tg3_write_indirect_reg32().
16592 tp->write32 = tg3_write_flush_reg32;
16595 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16596 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16597 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16598 tp->write32_rx_mbox = tg3_write_flush_reg32;
16601 if (tg3_flag(tp, ICH_WORKAROUND)) {
16602 tp->read32 = tg3_read_indirect_reg32;
16603 tp->write32 = tg3_write_indirect_reg32;
16604 tp->read32_mbox = tg3_read_indirect_mbox;
16605 tp->write32_mbox = tg3_write_indirect_mbox;
16606 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16607 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16612 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16613 pci_cmd &= ~PCI_COMMAND_MEMORY;
16614 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16616 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16617 tp->read32_mbox = tg3_read32_mbox_5906;
16618 tp->write32_mbox = tg3_write32_mbox_5906;
16619 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16620 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16623 if (tp->write32 == tg3_write_indirect_reg32 ||
16624 (tg3_flag(tp, PCIX_MODE) &&
16625 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16626 tg3_asic_rev(tp) == ASIC_REV_5701)))
16627 tg3_flag_set(tp, SRAM_USE_CONFIG);
16629 /* The memory arbiter has to be enabled in order for SRAM accesses
16630 * to succeed. Normally on powerup the tg3 chip firmware will make
16631 * sure it is enabled, but other entities such as system netboot
16632 * code might disable it.
16634 val = tr32(MEMARB_MODE);
16635 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16637 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16638 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16639 tg3_flag(tp, 5780_CLASS)) {
16640 if (tg3_flag(tp, PCIX_MODE)) {
16641 pci_read_config_dword(tp->pdev,
16642 tp->pcix_cap + PCI_X_STATUS,
16644 tp->pci_fn = val & 0x7;
16646 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16647 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16648 tg3_asic_rev(tp) == ASIC_REV_5720) {
16649 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16650 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16651 val = tr32(TG3_CPMU_STATUS);
16653 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16654 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16656 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16657 TG3_CPMU_STATUS_FSHFT_5719;
16660 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16661 tp->write32_tx_mbox = tg3_write_flush_reg32;
16662 tp->write32_rx_mbox = tg3_write_flush_reg32;
16665 /* Get eeprom hw config before calling tg3_set_power_state().
16666 * In particular, the TG3_FLAG_IS_NIC flag must be
16667 * determined before calling tg3_set_power_state() so that
16668 * we know whether or not to switch out of Vaux power.
16669 * When the flag is set, it means that GPIO1 is used for eeprom
16670 * write protect and also implies that it is a LOM where GPIOs
16671 * are not used to switch power.
16673 tg3_get_eeprom_hw_cfg(tp);
16675 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16676 tg3_flag_clear(tp, TSO_CAPABLE);
16677 tg3_flag_clear(tp, TSO_BUG);
16678 tp->fw_needed = NULL;
16681 if (tg3_flag(tp, ENABLE_APE)) {
16682 /* Allow reads and writes to the
16683 * APE register and memory space.
16685 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16686 PCISTATE_ALLOW_APE_SHMEM_WR |
16687 PCISTATE_ALLOW_APE_PSPACE_WR;
16688 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16691 tg3_ape_lock_init(tp);
16692 tp->ape_hb_interval =
16693 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16696 /* Set up tp->grc_local_ctrl before calling
16697 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16698 * will bring 5700's external PHY out of reset.
16699 * It is also used as eeprom write protect on LOMs.
16701 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16702 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16703 tg3_flag(tp, EEPROM_WRITE_PROT))
16704 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16705 GRC_LCLCTRL_GPIO_OUTPUT1);
16706 /* Unused GPIO3 must be driven as output on 5752 because there
16707 * are no pull-up resistors on unused GPIO pins.
16709 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16710 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16712 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16713 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16714 tg3_flag(tp, 57765_CLASS))
16715 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16717 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16719 /* Turn off the debug UART. */
16720 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16721 if (tg3_flag(tp, IS_NIC))
16722 /* Keep VMain power. */
16723 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16724 GRC_LCLCTRL_GPIO_OUTPUT0;
16727 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16728 tp->grc_local_ctrl |=
16729 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16731 /* Switch out of Vaux if it is a NIC */
16732 tg3_pwrsrc_switch_to_vmain(tp);
16734 /* Derive initial jumbo mode from MTU assigned in
16735 * ether_setup() via the alloc_etherdev() call
16737 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16738 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16740 /* Determine WakeOnLan speed to use. */
16741 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16742 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16743 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16744 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16745 tg3_flag_clear(tp, WOL_SPEED_100MB);
16747 tg3_flag_set(tp, WOL_SPEED_100MB);
16750 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16751 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16753 /* A few boards don't want Ethernet@WireSpeed phy feature */
16754 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16755 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16756 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16757 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16758 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16759 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16760 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16762 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16763 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16764 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16765 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16766 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16768 if (tg3_flag(tp, 5705_PLUS) &&
16769 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16770 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16771 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16772 !tg3_flag(tp, 57765_PLUS)) {
16773 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16774 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16775 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16776 tg3_asic_rev(tp) == ASIC_REV_5761) {
16777 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16778 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16779 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16780 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16781 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16783 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16786 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16787 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16788 tp->phy_otp = tg3_read_otp_phycfg(tp);
16789 if (tp->phy_otp == 0)
16790 tp->phy_otp = TG3_OTP_DEFAULT;
16793 if (tg3_flag(tp, CPMU_PRESENT))
16794 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16796 tp->mi_mode = MAC_MI_MODE_BASE;
16798 tp->coalesce_mode = 0;
16799 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16800 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16801 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16803 /* Set these bits to enable statistics workaround. */
16804 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16805 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16806 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16807 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16808 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16809 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16812 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16813 tg3_asic_rev(tp) == ASIC_REV_57780)
16814 tg3_flag_set(tp, USE_PHYLIB);
16816 err = tg3_mdio_init(tp);
16820 /* Initialize data/descriptor byte/word swapping. */
16821 val = tr32(GRC_MODE);
16822 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16823 tg3_asic_rev(tp) == ASIC_REV_5762)
16824 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16825 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16826 GRC_MODE_B2HRX_ENABLE |
16827 GRC_MODE_HTX2B_ENABLE |
16828 GRC_MODE_HOST_STACKUP);
16830 val &= GRC_MODE_HOST_STACKUP;
16832 tw32(GRC_MODE, val | tp->grc_mode);
16834 tg3_switch_clocks(tp);
16836 /* Clear this out for sanity. */
16837 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16839 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16840 tw32(TG3PCI_REG_BASE_ADDR, 0);
16842 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16844 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16845 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16846 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16847 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16848 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16849 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16850 void __iomem *sram_base;
16852 /* Write some dummy words into the SRAM status block
16853 * area, see if it reads back correctly. If the return
16854 * value is bad, force enable the PCIX workaround.
16856 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16858 writel(0x00000000, sram_base);
16859 writel(0x00000000, sram_base + 4);
16860 writel(0xffffffff, sram_base + 4);
16861 if (readl(sram_base) != 0x00000000)
16862 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16867 tg3_nvram_init(tp);
16869 /* If the device has an NVRAM, no need to load patch firmware */
16870 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16871 !tg3_flag(tp, NO_NVRAM))
16872 tp->fw_needed = NULL;
16874 grc_misc_cfg = tr32(GRC_MISC_CFG);
16875 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16877 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16878 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16879 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16880 tg3_flag_set(tp, IS_5788);
16882 if (!tg3_flag(tp, IS_5788) &&
16883 tg3_asic_rev(tp) != ASIC_REV_5700)
16884 tg3_flag_set(tp, TAGGED_STATUS);
16885 if (tg3_flag(tp, TAGGED_STATUS)) {
16886 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16887 HOSTCC_MODE_CLRTICK_TXBD);
16889 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16890 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16891 tp->misc_host_ctrl);
16894 /* Preserve the APE MAC_MODE bits */
16895 if (tg3_flag(tp, ENABLE_APE))
16896 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16900 if (tg3_10_100_only_device(tp, ent))
16901 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16903 err = tg3_phy_probe(tp);
16905 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16906 /* ... but do not return immediately ... */
16911 tg3_read_fw_ver(tp);
16913 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16914 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16916 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16917 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16919 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16922 /* 5700 {AX,BX} chips have a broken status block link
16923 * change bit implementation, so we must use the
16924 * status register in those cases.
16926 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16927 tg3_flag_set(tp, USE_LINKCHG_REG);
16929 tg3_flag_clear(tp, USE_LINKCHG_REG);
16931 /* The led_ctrl is set during tg3_phy_probe, here we might
16932 * have to force the link status polling mechanism based
16933 * upon subsystem IDs.
16935 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16936 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16937 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16938 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16939 tg3_flag_set(tp, USE_LINKCHG_REG);
16942 /* For all SERDES we poll the MAC status register. */
16943 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16944 tg3_flag_set(tp, POLL_SERDES);
16946 tg3_flag_clear(tp, POLL_SERDES);
16948 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16949 tg3_flag_set(tp, POLL_CPMU_LINK);
16951 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16952 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16953 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16954 tg3_flag(tp, PCIX_MODE)) {
16955 tp->rx_offset = NET_SKB_PAD;
16956 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16957 tp->rx_copy_thresh = ~(u16)0;
16961 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16962 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16963 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16965 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16967 /* Increment the rx prod index on the rx std ring by at most
16968 * 8 for these chips to workaround hw errata.
16970 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16971 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16972 tg3_asic_rev(tp) == ASIC_REV_5755)
16973 tp->rx_std_max_post = 8;
16975 if (tg3_flag(tp, ASPM_WORKAROUND))
16976 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16977 PCIE_PWR_MGMT_L1_THRESH_MSK;
16982 static int tg3_get_device_address(struct tg3 *tp)
16984 struct net_device *dev = tp->dev;
16985 u32 hi, lo, mac_offset;
16989 if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16992 if (tg3_flag(tp, IS_SSB_CORE)) {
16993 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16994 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16999 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17000 tg3_flag(tp, 5780_CLASS)) {
17001 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17003 if (tg3_nvram_lock(tp))
17004 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17006 tg3_nvram_unlock(tp);
17007 } else if (tg3_flag(tp, 5717_PLUS)) {
17008 if (tp->pci_fn & 1)
17010 if (tp->pci_fn > 1)
17011 mac_offset += 0x18c;
17012 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17015 /* First try to get it from MAC address mailbox. */
17016 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17017 if ((hi >> 16) == 0x484b) {
17018 dev->dev_addr[0] = (hi >> 8) & 0xff;
17019 dev->dev_addr[1] = (hi >> 0) & 0xff;
17021 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17022 dev->dev_addr[2] = (lo >> 24) & 0xff;
17023 dev->dev_addr[3] = (lo >> 16) & 0xff;
17024 dev->dev_addr[4] = (lo >> 8) & 0xff;
17025 dev->dev_addr[5] = (lo >> 0) & 0xff;
17027 /* Some old bootcode may report a 0 MAC address in SRAM */
17028 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17031 /* Next, try NVRAM. */
17032 if (!tg3_flag(tp, NO_NVRAM) &&
17033 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17034 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17035 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17036 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17038 /* Finally just fetch it out of the MAC control regs. */
17040 hi = tr32(MAC_ADDR_0_HIGH);
17041 lo = tr32(MAC_ADDR_0_LOW);
17043 dev->dev_addr[5] = lo & 0xff;
17044 dev->dev_addr[4] = (lo >> 8) & 0xff;
17045 dev->dev_addr[3] = (lo >> 16) & 0xff;
17046 dev->dev_addr[2] = (lo >> 24) & 0xff;
17047 dev->dev_addr[1] = hi & 0xff;
17048 dev->dev_addr[0] = (hi >> 8) & 0xff;
17052 if (!is_valid_ether_addr(&dev->dev_addr[0]))
17057 #define BOUNDARY_SINGLE_CACHELINE 1
17058 #define BOUNDARY_MULTI_CACHELINE 2
17060 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17062 int cacheline_size;
17066 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17068 cacheline_size = 1024;
17070 cacheline_size = (int) byte * 4;
17072 /* On 5703 and later chips, the boundary bits have no
17075 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17076 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17077 !tg3_flag(tp, PCI_EXPRESS))
17080 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17081 goal = BOUNDARY_MULTI_CACHELINE;
17083 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17084 goal = BOUNDARY_SINGLE_CACHELINE;
17090 if (tg3_flag(tp, 57765_PLUS)) {
17091 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17098 /* PCI controllers on most RISC systems tend to disconnect
17099 * when a device tries to burst across a cache-line boundary.
17100 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17102 * Unfortunately, for PCI-E there are only limited
17103 * write-side controls for this, and thus for reads
17104 * we will still get the disconnects. We'll also waste
17105 * these PCI cycles for both read and write for chips
17106 * other than 5700 and 5701 which do not implement the
17109 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17110 switch (cacheline_size) {
17115 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17116 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17117 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17119 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17120 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17125 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17126 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17130 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17131 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17134 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17135 switch (cacheline_size) {
17139 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17140 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17141 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17147 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17148 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17152 switch (cacheline_size) {
17154 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17155 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17156 DMA_RWCTRL_WRITE_BNDRY_16);
17161 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17162 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17163 DMA_RWCTRL_WRITE_BNDRY_32);
17168 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17169 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17170 DMA_RWCTRL_WRITE_BNDRY_64);
17175 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17176 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17177 DMA_RWCTRL_WRITE_BNDRY_128);
17182 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17183 DMA_RWCTRL_WRITE_BNDRY_256);
17186 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17187 DMA_RWCTRL_WRITE_BNDRY_512);
17191 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17192 DMA_RWCTRL_WRITE_BNDRY_1024);
17201 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17202 int size, bool to_device)
17204 struct tg3_internal_buffer_desc test_desc;
17205 u32 sram_dma_descs;
17208 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17210 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17211 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17212 tw32(RDMAC_STATUS, 0);
17213 tw32(WDMAC_STATUS, 0);
17215 tw32(BUFMGR_MODE, 0);
17216 tw32(FTQ_RESET, 0);
17218 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17219 test_desc.addr_lo = buf_dma & 0xffffffff;
17220 test_desc.nic_mbuf = 0x00002100;
17221 test_desc.len = size;
17224 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17225 * the *second* time the tg3 driver was getting loaded after an
17228 * Broadcom tells me:
17229 * ...the DMA engine is connected to the GRC block and a DMA
17230 * reset may affect the GRC block in some unpredictable way...
17231 * The behavior of resets to individual blocks has not been tested.
17233 * Broadcom noted the GRC reset will also reset all sub-components.
17236 test_desc.cqid_sqid = (13 << 8) | 2;
17238 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17241 test_desc.cqid_sqid = (16 << 8) | 7;
17243 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17246 test_desc.flags = 0x00000005;
17248 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17251 val = *(((u32 *)&test_desc) + i);
17252 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17253 sram_dma_descs + (i * sizeof(u32)));
17254 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17256 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17259 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17261 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17264 for (i = 0; i < 40; i++) {
17268 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17270 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17271 if ((val & 0xffff) == sram_dma_descs) {
17282 #define TEST_BUFFER_SIZE 0x2000
17284 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17285 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17289 static int tg3_test_dma(struct tg3 *tp)
17291 dma_addr_t buf_dma;
17292 u32 *buf, saved_dma_rwctrl;
17295 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17296 &buf_dma, GFP_KERNEL);
17302 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17303 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17305 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17307 if (tg3_flag(tp, 57765_PLUS))
17310 if (tg3_flag(tp, PCI_EXPRESS)) {
17311 /* DMA read watermark not used on PCIE */
17312 tp->dma_rwctrl |= 0x00180000;
17313 } else if (!tg3_flag(tp, PCIX_MODE)) {
17314 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17315 tg3_asic_rev(tp) == ASIC_REV_5750)
17316 tp->dma_rwctrl |= 0x003f0000;
17318 tp->dma_rwctrl |= 0x003f000f;
17320 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17321 tg3_asic_rev(tp) == ASIC_REV_5704) {
17322 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17323 u32 read_water = 0x7;
17325 /* If the 5704 is behind the EPB bridge, we can
17326 * do the less restrictive ONE_DMA workaround for
17327 * better performance.
17329 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17330 tg3_asic_rev(tp) == ASIC_REV_5704)
17331 tp->dma_rwctrl |= 0x8000;
17332 else if (ccval == 0x6 || ccval == 0x7)
17333 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17335 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17337 /* Set bit 23 to enable PCIX hw bug fix */
17339 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17340 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17342 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17343 /* 5780 always in PCIX mode */
17344 tp->dma_rwctrl |= 0x00144000;
17345 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17346 /* 5714 always in PCIX mode */
17347 tp->dma_rwctrl |= 0x00148000;
17349 tp->dma_rwctrl |= 0x001b000f;
17352 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17353 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17355 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17356 tg3_asic_rev(tp) == ASIC_REV_5704)
17357 tp->dma_rwctrl &= 0xfffffff0;
17359 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17360 tg3_asic_rev(tp) == ASIC_REV_5701) {
17361 /* Remove this if it causes problems for some boards. */
17362 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17364 /* On 5700/5701 chips, we need to set this bit.
17365 * Otherwise the chip will issue cacheline transactions
17366 * to streamable DMA memory with not all the byte
17367 * enables turned on. This is an error on several
17368 * RISC PCI controllers, in particular sparc64.
17370 * On 5703/5704 chips, this bit has been reassigned
17371 * a different meaning. In particular, it is used
17372 * on those chips to enable a PCI-X workaround.
17374 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17377 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17380 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17381 tg3_asic_rev(tp) != ASIC_REV_5701)
17384 /* It is best to perform DMA test with maximum write burst size
17385 * to expose the 5700/5701 write DMA bug.
17387 saved_dma_rwctrl = tp->dma_rwctrl;
17388 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17389 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17394 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17397 /* Send the buffer to the chip. */
17398 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17400 dev_err(&tp->pdev->dev,
17401 "%s: Buffer write failed. err = %d\n",
17406 /* Now read it back. */
17407 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17409 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17410 "err = %d\n", __func__, ret);
17415 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17419 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17420 DMA_RWCTRL_WRITE_BNDRY_16) {
17421 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17422 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17423 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17426 dev_err(&tp->pdev->dev,
17427 "%s: Buffer corrupted on read back! "
17428 "(%d != %d)\n", __func__, p[i], i);
17434 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17440 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17441 DMA_RWCTRL_WRITE_BNDRY_16) {
17442 /* DMA test passed without adjusting DMA boundary,
17443 * now look for chipsets that are known to expose the
17444 * DMA bug without failing the test.
17446 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17447 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17448 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17450 /* Safe to use the calculated DMA boundary. */
17451 tp->dma_rwctrl = saved_dma_rwctrl;
17454 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17458 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17463 static void tg3_init_bufmgr_config(struct tg3 *tp)
17465 if (tg3_flag(tp, 57765_PLUS)) {
17466 tp->bufmgr_config.mbuf_read_dma_low_water =
17467 DEFAULT_MB_RDMA_LOW_WATER_5705;
17468 tp->bufmgr_config.mbuf_mac_rx_low_water =
17469 DEFAULT_MB_MACRX_LOW_WATER_57765;
17470 tp->bufmgr_config.mbuf_high_water =
17471 DEFAULT_MB_HIGH_WATER_57765;
17473 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17474 DEFAULT_MB_RDMA_LOW_WATER_5705;
17475 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17476 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17477 tp->bufmgr_config.mbuf_high_water_jumbo =
17478 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17479 } else if (tg3_flag(tp, 5705_PLUS)) {
17480 tp->bufmgr_config.mbuf_read_dma_low_water =
17481 DEFAULT_MB_RDMA_LOW_WATER_5705;
17482 tp->bufmgr_config.mbuf_mac_rx_low_water =
17483 DEFAULT_MB_MACRX_LOW_WATER_5705;
17484 tp->bufmgr_config.mbuf_high_water =
17485 DEFAULT_MB_HIGH_WATER_5705;
17486 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17487 tp->bufmgr_config.mbuf_mac_rx_low_water =
17488 DEFAULT_MB_MACRX_LOW_WATER_5906;
17489 tp->bufmgr_config.mbuf_high_water =
17490 DEFAULT_MB_HIGH_WATER_5906;
17493 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17494 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17495 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17496 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17497 tp->bufmgr_config.mbuf_high_water_jumbo =
17498 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17500 tp->bufmgr_config.mbuf_read_dma_low_water =
17501 DEFAULT_MB_RDMA_LOW_WATER;
17502 tp->bufmgr_config.mbuf_mac_rx_low_water =
17503 DEFAULT_MB_MACRX_LOW_WATER;
17504 tp->bufmgr_config.mbuf_high_water =
17505 DEFAULT_MB_HIGH_WATER;
17507 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17508 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17509 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17510 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17511 tp->bufmgr_config.mbuf_high_water_jumbo =
17512 DEFAULT_MB_HIGH_WATER_JUMBO;
17515 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17516 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17519 static char *tg3_phy_string(struct tg3 *tp)
17521 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17522 case TG3_PHY_ID_BCM5400: return "5400";
17523 case TG3_PHY_ID_BCM5401: return "5401";
17524 case TG3_PHY_ID_BCM5411: return "5411";
17525 case TG3_PHY_ID_BCM5701: return "5701";
17526 case TG3_PHY_ID_BCM5703: return "5703";
17527 case TG3_PHY_ID_BCM5704: return "5704";
17528 case TG3_PHY_ID_BCM5705: return "5705";
17529 case TG3_PHY_ID_BCM5750: return "5750";
17530 case TG3_PHY_ID_BCM5752: return "5752";
17531 case TG3_PHY_ID_BCM5714: return "5714";
17532 case TG3_PHY_ID_BCM5780: return "5780";
17533 case TG3_PHY_ID_BCM5755: return "5755";
17534 case TG3_PHY_ID_BCM5787: return "5787";
17535 case TG3_PHY_ID_BCM5784: return "5784";
17536 case TG3_PHY_ID_BCM5756: return "5722/5756";
17537 case TG3_PHY_ID_BCM5906: return "5906";
17538 case TG3_PHY_ID_BCM5761: return "5761";
17539 case TG3_PHY_ID_BCM5718C: return "5718C";
17540 case TG3_PHY_ID_BCM5718S: return "5718S";
17541 case TG3_PHY_ID_BCM57765: return "57765";
17542 case TG3_PHY_ID_BCM5719C: return "5719C";
17543 case TG3_PHY_ID_BCM5720C: return "5720C";
17544 case TG3_PHY_ID_BCM5762: return "5762C";
17545 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17546 case 0: return "serdes";
17547 default: return "unknown";
17551 static char *tg3_bus_string(struct tg3 *tp, char *str)
17553 if (tg3_flag(tp, PCI_EXPRESS)) {
17554 strcpy(str, "PCI Express");
17556 } else if (tg3_flag(tp, PCIX_MODE)) {
17557 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17559 strcpy(str, "PCIX:");
17561 if ((clock_ctrl == 7) ||
17562 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17563 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17564 strcat(str, "133MHz");
17565 else if (clock_ctrl == 0)
17566 strcat(str, "33MHz");
17567 else if (clock_ctrl == 2)
17568 strcat(str, "50MHz");
17569 else if (clock_ctrl == 4)
17570 strcat(str, "66MHz");
17571 else if (clock_ctrl == 6)
17572 strcat(str, "100MHz");
17574 strcpy(str, "PCI:");
17575 if (tg3_flag(tp, PCI_HIGH_SPEED))
17576 strcat(str, "66MHz");
17578 strcat(str, "33MHz");
17580 if (tg3_flag(tp, PCI_32BIT))
17581 strcat(str, ":32-bit");
17583 strcat(str, ":64-bit");
17587 static void tg3_init_coal(struct tg3 *tp)
17589 struct ethtool_coalesce *ec = &tp->coal;
17591 memset(ec, 0, sizeof(*ec));
17592 ec->cmd = ETHTOOL_GCOALESCE;
17593 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17594 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17595 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17596 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17597 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17598 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17599 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17600 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17601 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17603 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17604 HOSTCC_MODE_CLRTICK_TXBD)) {
17605 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17606 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17607 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17608 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17611 if (tg3_flag(tp, 5705_PLUS)) {
17612 ec->rx_coalesce_usecs_irq = 0;
17613 ec->tx_coalesce_usecs_irq = 0;
17614 ec->stats_block_coalesce_usecs = 0;
17618 static int tg3_init_one(struct pci_dev *pdev,
17619 const struct pci_device_id *ent)
17621 struct net_device *dev;
17624 u32 sndmbx, rcvmbx, intmbx;
17626 u64 dma_mask, persist_dma_mask;
17627 netdev_features_t features = 0;
17629 printk_once(KERN_INFO "%s\n", version);
17631 err = pci_enable_device(pdev);
17633 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17637 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17639 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17640 goto err_out_disable_pdev;
17643 pci_set_master(pdev);
17645 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17648 goto err_out_free_res;
17651 SET_NETDEV_DEV(dev, &pdev->dev);
17653 tp = netdev_priv(dev);
17656 tp->rx_mode = TG3_DEF_RX_MODE;
17657 tp->tx_mode = TG3_DEF_TX_MODE;
17659 tp->pcierr_recovery = false;
17662 tp->msg_enable = tg3_debug;
17664 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17666 if (pdev_is_ssb_gige_core(pdev)) {
17667 tg3_flag_set(tp, IS_SSB_CORE);
17668 if (ssb_gige_must_flush_posted_writes(pdev))
17669 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17670 if (ssb_gige_one_dma_at_once(pdev))
17671 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17672 if (ssb_gige_have_roboswitch(pdev)) {
17673 tg3_flag_set(tp, USE_PHYLIB);
17674 tg3_flag_set(tp, ROBOSWITCH);
17676 if (ssb_gige_is_rgmii(pdev))
17677 tg3_flag_set(tp, RGMII_MODE);
17680 /* The word/byte swap controls here control register access byte
17681 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17684 tp->misc_host_ctrl =
17685 MISC_HOST_CTRL_MASK_PCI_INT |
17686 MISC_HOST_CTRL_WORD_SWAP |
17687 MISC_HOST_CTRL_INDIR_ACCESS |
17688 MISC_HOST_CTRL_PCISTATE_RW;
17690 /* The NONFRM (non-frame) byte/word swap controls take effect
17691 * on descriptor entries, anything which isn't packet data.
17693 * The StrongARM chips on the board (one for tx, one for rx)
17694 * are running in big-endian mode.
17696 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17697 GRC_MODE_WSWAP_NONFRM_DATA);
17698 #ifdef __BIG_ENDIAN
17699 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17701 spin_lock_init(&tp->lock);
17702 spin_lock_init(&tp->indirect_lock);
17703 INIT_WORK(&tp->reset_task, tg3_reset_task);
17705 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17707 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17709 goto err_out_free_dev;
17712 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17713 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17714 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17715 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17719 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17721 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17726 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17727 tg3_flag_set(tp, ENABLE_APE);
17728 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17729 if (!tp->aperegs) {
17730 dev_err(&pdev->dev,
17731 "Cannot map APE registers, aborting\n");
17733 goto err_out_iounmap;
17737 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17738 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17740 dev->ethtool_ops = &tg3_ethtool_ops;
17741 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17742 dev->netdev_ops = &tg3_netdev_ops;
17743 dev->irq = pdev->irq;
17745 err = tg3_get_invariants(tp, ent);
17747 dev_err(&pdev->dev,
17748 "Problem fetching invariants of chip, aborting\n");
17749 goto err_out_apeunmap;
17752 /* The EPB bridge inside 5714, 5715, and 5780 and any
17753 * device behind the EPB cannot support DMA addresses > 40-bit.
17754 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17755 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17756 * do DMA address check in tg3_start_xmit().
17758 if (tg3_flag(tp, IS_5788))
17759 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17760 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17761 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17762 #ifdef CONFIG_HIGHMEM
17763 dma_mask = DMA_BIT_MASK(64);
17766 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17768 /* Configure DMA attributes. */
17769 if (dma_mask > DMA_BIT_MASK(32)) {
17770 err = pci_set_dma_mask(pdev, dma_mask);
17772 features |= NETIF_F_HIGHDMA;
17773 err = pci_set_consistent_dma_mask(pdev,
17776 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17777 "DMA for consistent allocations\n");
17778 goto err_out_apeunmap;
17782 if (err || dma_mask == DMA_BIT_MASK(32)) {
17783 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17785 dev_err(&pdev->dev,
17786 "No usable DMA configuration, aborting\n");
17787 goto err_out_apeunmap;
17791 tg3_init_bufmgr_config(tp);
17793 /* 5700 B0 chips do not support checksumming correctly due
17794 * to hardware bugs.
17796 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17797 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17799 if (tg3_flag(tp, 5755_PLUS))
17800 features |= NETIF_F_IPV6_CSUM;
17803 /* TSO is on by default on chips that support hardware TSO.
17804 * Firmware TSO on older chips gives lower performance, so it
17805 * is off by default, but can be enabled using ethtool.
17807 if ((tg3_flag(tp, HW_TSO_1) ||
17808 tg3_flag(tp, HW_TSO_2) ||
17809 tg3_flag(tp, HW_TSO_3)) &&
17810 (features & NETIF_F_IP_CSUM))
17811 features |= NETIF_F_TSO;
17812 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17813 if (features & NETIF_F_IPV6_CSUM)
17814 features |= NETIF_F_TSO6;
17815 if (tg3_flag(tp, HW_TSO_3) ||
17816 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17817 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17818 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17819 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17820 tg3_asic_rev(tp) == ASIC_REV_57780)
17821 features |= NETIF_F_TSO_ECN;
17824 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17825 NETIF_F_HW_VLAN_CTAG_RX;
17826 dev->vlan_features |= features;
17829 * Add loopback capability only for a subset of devices that support
17830 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17831 * loopback for the remaining devices.
17833 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17834 !tg3_flag(tp, CPMU_PRESENT))
17835 /* Add the loopback capability */
17836 features |= NETIF_F_LOOPBACK;
17838 dev->hw_features |= features;
17839 dev->priv_flags |= IFF_UNICAST_FLT;
17841 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17842 dev->min_mtu = TG3_MIN_MTU;
17843 dev->max_mtu = TG3_MAX_MTU(tp);
17845 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17846 !tg3_flag(tp, TSO_CAPABLE) &&
17847 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17848 tg3_flag_set(tp, MAX_RXPEND_64);
17849 tp->rx_pending = 63;
17852 err = tg3_get_device_address(tp);
17854 dev_err(&pdev->dev,
17855 "Could not obtain valid ethernet address, aborting\n");
17856 goto err_out_apeunmap;
17859 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17860 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17861 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17862 for (i = 0; i < tp->irq_max; i++) {
17863 struct tg3_napi *tnapi = &tp->napi[i];
17866 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17868 tnapi->int_mbox = intmbx;
17874 tnapi->consmbox = rcvmbx;
17875 tnapi->prodmbox = sndmbx;
17878 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17880 tnapi->coal_now = HOSTCC_MODE_NOW;
17882 if (!tg3_flag(tp, SUPPORT_MSIX))
17886 * If we support MSIX, we'll be using RSS. If we're using
17887 * RSS, the first vector only handles link interrupts and the
17888 * remaining vectors handle rx and tx interrupts. Reuse the
17889 * mailbox values for the next iteration. The values we setup
17890 * above are still useful for the single vectored mode.
17904 * Reset chip in case UNDI or EFI driver did not shutdown
17905 * DMA self test will enable WDMAC and we'll see (spurious)
17906 * pending DMA on the PCI bus at that point.
17908 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17909 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17910 tg3_full_lock(tp, 0);
17911 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17912 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17913 tg3_full_unlock(tp);
17916 err = tg3_test_dma(tp);
17918 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17919 goto err_out_apeunmap;
17924 pci_set_drvdata(pdev, dev);
17926 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17927 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17928 tg3_asic_rev(tp) == ASIC_REV_5762)
17929 tg3_flag_set(tp, PTP_CAPABLE);
17931 tg3_timer_init(tp);
17933 tg3_carrier_off(tp);
17935 err = register_netdev(dev);
17937 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17938 goto err_out_apeunmap;
17941 if (tg3_flag(tp, PTP_CAPABLE)) {
17943 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17945 if (IS_ERR(tp->ptp_clock))
17946 tp->ptp_clock = NULL;
17949 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17950 tp->board_part_number,
17951 tg3_chip_rev_id(tp),
17952 tg3_bus_string(tp, str),
17955 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17958 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17959 ethtype = "10/100Base-TX";
17960 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17961 ethtype = "1000Base-SX";
17963 ethtype = "10/100/1000Base-T";
17965 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17966 "(WireSpeed[%d], EEE[%d])\n",
17967 tg3_phy_string(tp), ethtype,
17968 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17969 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17972 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17973 (dev->features & NETIF_F_RXCSUM) != 0,
17974 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17975 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17976 tg3_flag(tp, ENABLE_ASF) != 0,
17977 tg3_flag(tp, TSO_CAPABLE) != 0);
17978 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17980 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17981 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17983 pci_save_state(pdev);
17989 iounmap(tp->aperegs);
17990 tp->aperegs = NULL;
18003 pci_release_regions(pdev);
18005 err_out_disable_pdev:
18006 if (pci_is_enabled(pdev))
18007 pci_disable_device(pdev);
18011 static void tg3_remove_one(struct pci_dev *pdev)
18013 struct net_device *dev = pci_get_drvdata(pdev);
18016 struct tg3 *tp = netdev_priv(dev);
18020 release_firmware(tp->fw);
18022 tg3_reset_task_cancel(tp);
18024 if (tg3_flag(tp, USE_PHYLIB)) {
18029 unregister_netdev(dev);
18031 iounmap(tp->aperegs);
18032 tp->aperegs = NULL;
18039 pci_release_regions(pdev);
18040 pci_disable_device(pdev);
18044 #ifdef CONFIG_PM_SLEEP
18045 static int tg3_suspend(struct device *device)
18047 struct pci_dev *pdev = to_pci_dev(device);
18048 struct net_device *dev = pci_get_drvdata(pdev);
18049 struct tg3 *tp = netdev_priv(dev);
18054 if (!netif_running(dev))
18057 tg3_reset_task_cancel(tp);
18059 tg3_netif_stop(tp);
18061 tg3_timer_stop(tp);
18063 tg3_full_lock(tp, 1);
18064 tg3_disable_ints(tp);
18065 tg3_full_unlock(tp);
18067 netif_device_detach(dev);
18069 tg3_full_lock(tp, 0);
18070 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18071 tg3_flag_clear(tp, INIT_COMPLETE);
18072 tg3_full_unlock(tp);
18074 err = tg3_power_down_prepare(tp);
18078 tg3_full_lock(tp, 0);
18080 tg3_flag_set(tp, INIT_COMPLETE);
18081 err2 = tg3_restart_hw(tp, true);
18085 tg3_timer_start(tp);
18087 netif_device_attach(dev);
18088 tg3_netif_start(tp);
18091 tg3_full_unlock(tp);
18102 static int tg3_resume(struct device *device)
18104 struct pci_dev *pdev = to_pci_dev(device);
18105 struct net_device *dev = pci_get_drvdata(pdev);
18106 struct tg3 *tp = netdev_priv(dev);
18111 if (!netif_running(dev))
18114 netif_device_attach(dev);
18116 tg3_full_lock(tp, 0);
18118 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18120 tg3_flag_set(tp, INIT_COMPLETE);
18121 err = tg3_restart_hw(tp,
18122 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18126 tg3_timer_start(tp);
18128 tg3_netif_start(tp);
18131 tg3_full_unlock(tp);
18140 #endif /* CONFIG_PM_SLEEP */
18142 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18144 static void tg3_shutdown(struct pci_dev *pdev)
18146 struct net_device *dev = pci_get_drvdata(pdev);
18147 struct tg3 *tp = netdev_priv(dev);
18150 netif_device_detach(dev);
18152 if (netif_running(dev))
18155 if (system_state == SYSTEM_POWER_OFF)
18156 tg3_power_down(tp);
18162 * tg3_io_error_detected - called when PCI error is detected
18163 * @pdev: Pointer to PCI device
18164 * @state: The current pci connection state
18166 * This function is called after a PCI bus error affecting
18167 * this device has been detected.
18169 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18170 pci_channel_state_t state)
18172 struct net_device *netdev = pci_get_drvdata(pdev);
18173 struct tg3 *tp = netdev_priv(netdev);
18174 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18176 netdev_info(netdev, "PCI I/O error detected\n");
18180 /* We probably don't have netdev yet */
18181 if (!netdev || !netif_running(netdev))
18184 /* We needn't recover from permanent error */
18185 if (state == pci_channel_io_frozen)
18186 tp->pcierr_recovery = true;
18190 tg3_netif_stop(tp);
18192 tg3_timer_stop(tp);
18194 /* Want to make sure that the reset task doesn't run */
18195 tg3_reset_task_cancel(tp);
18197 netif_device_detach(netdev);
18199 /* Clean up software state, even if MMIO is blocked */
18200 tg3_full_lock(tp, 0);
18201 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18202 tg3_full_unlock(tp);
18205 if (state == pci_channel_io_perm_failure) {
18207 tg3_napi_enable(tp);
18210 err = PCI_ERS_RESULT_DISCONNECT;
18212 pci_disable_device(pdev);
18221 * tg3_io_slot_reset - called after the pci bus has been reset.
18222 * @pdev: Pointer to PCI device
18224 * Restart the card from scratch, as if from a cold-boot.
18225 * At this point, the card has exprienced a hard reset,
18226 * followed by fixups by BIOS, and has its config space
18227 * set up identically to what it was at cold boot.
18229 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18231 struct net_device *netdev = pci_get_drvdata(pdev);
18232 struct tg3 *tp = netdev_priv(netdev);
18233 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18238 if (pci_enable_device(pdev)) {
18239 dev_err(&pdev->dev,
18240 "Cannot re-enable PCI device after reset.\n");
18244 pci_set_master(pdev);
18245 pci_restore_state(pdev);
18246 pci_save_state(pdev);
18248 if (!netdev || !netif_running(netdev)) {
18249 rc = PCI_ERS_RESULT_RECOVERED;
18253 err = tg3_power_up(tp);
18257 rc = PCI_ERS_RESULT_RECOVERED;
18260 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18261 tg3_napi_enable(tp);
18270 * tg3_io_resume - called when traffic can start flowing again.
18271 * @pdev: Pointer to PCI device
18273 * This callback is called when the error recovery driver tells
18274 * us that its OK to resume normal operation.
18276 static void tg3_io_resume(struct pci_dev *pdev)
18278 struct net_device *netdev = pci_get_drvdata(pdev);
18279 struct tg3 *tp = netdev_priv(netdev);
18284 if (!netdev || !netif_running(netdev))
18287 tg3_full_lock(tp, 0);
18288 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18289 tg3_flag_set(tp, INIT_COMPLETE);
18290 err = tg3_restart_hw(tp, true);
18292 tg3_full_unlock(tp);
18293 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18297 netif_device_attach(netdev);
18299 tg3_timer_start(tp);
18301 tg3_netif_start(tp);
18303 tg3_full_unlock(tp);
18308 tp->pcierr_recovery = false;
18312 static const struct pci_error_handlers tg3_err_handler = {
18313 .error_detected = tg3_io_error_detected,
18314 .slot_reset = tg3_io_slot_reset,
18315 .resume = tg3_io_resume
18318 static struct pci_driver tg3_driver = {
18319 .name = DRV_MODULE_NAME,
18320 .id_table = tg3_pci_tbl,
18321 .probe = tg3_init_one,
18322 .remove = tg3_remove_one,
18323 .err_handler = &tg3_err_handler,
18324 .driver.pm = &tg3_pm_ops,
18325 .shutdown = tg3_shutdown,
18328 module_pci_driver(tg3_driver);