2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.106"
72 #define DRV_MODULE_RELDATE "January 12, 2010"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 #define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
143 #define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
149 #define TG3_RAW_IP_ALIGN 2
151 /* number of ETHTOOL_GSTATS u64's */
152 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
154 #define TG3_NUM_TEST 6
156 #define FIRMWARE_TG3 "tigon/tg3.bin"
157 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
158 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
160 static char version[] __devinitdata =
161 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
163 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_MODULE_VERSION);
167 MODULE_FIRMWARE(FIRMWARE_TG3);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
169 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
171 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
173 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174 module_param(tg3_debug, int, 0);
175 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
177 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
254 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
255 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
256 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
257 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
258 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
259 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
265 static const struct {
266 const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
270 { "rx_ucast_packets" },
271 { "rx_mcast_packets" },
272 { "rx_bcast_packets" },
274 { "rx_align_errors" },
275 { "rx_xon_pause_rcvd" },
276 { "rx_xoff_pause_rcvd" },
277 { "rx_mac_ctrl_rcvd" },
278 { "rx_xoff_entered" },
279 { "rx_frame_too_long_errors" },
281 { "rx_undersize_packets" },
282 { "rx_in_length_errors" },
283 { "rx_out_length_errors" },
284 { "rx_64_or_less_octet_packets" },
285 { "rx_65_to_127_octet_packets" },
286 { "rx_128_to_255_octet_packets" },
287 { "rx_256_to_511_octet_packets" },
288 { "rx_512_to_1023_octet_packets" },
289 { "rx_1024_to_1522_octet_packets" },
290 { "rx_1523_to_2047_octet_packets" },
291 { "rx_2048_to_4095_octet_packets" },
292 { "rx_4096_to_8191_octet_packets" },
293 { "rx_8192_to_9022_octet_packets" },
300 { "tx_flow_control" },
302 { "tx_single_collisions" },
303 { "tx_mult_collisions" },
305 { "tx_excessive_collisions" },
306 { "tx_late_collisions" },
307 { "tx_collide_2times" },
308 { "tx_collide_3times" },
309 { "tx_collide_4times" },
310 { "tx_collide_5times" },
311 { "tx_collide_6times" },
312 { "tx_collide_7times" },
313 { "tx_collide_8times" },
314 { "tx_collide_9times" },
315 { "tx_collide_10times" },
316 { "tx_collide_11times" },
317 { "tx_collide_12times" },
318 { "tx_collide_13times" },
319 { "tx_collide_14times" },
320 { "tx_collide_15times" },
321 { "tx_ucast_packets" },
322 { "tx_mcast_packets" },
323 { "tx_bcast_packets" },
324 { "tx_carrier_sense_errors" },
328 { "dma_writeq_full" },
329 { "dma_write_prioq_full" },
333 { "rx_threshold_hit" },
335 { "dma_readq_full" },
336 { "dma_read_prioq_full" },
337 { "tx_comp_queue_full" },
339 { "ring_set_send_prod_index" },
340 { "ring_status_update" },
342 { "nic_avoided_irqs" },
343 { "nic_tx_threshold_hit" }
346 static const struct {
347 const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349 { "nvram test (online) " },
350 { "link test (online) " },
351 { "register test (offline)" },
352 { "memory test (offline)" },
353 { "loopback test (offline)" },
354 { "interrupt test (offline)" },
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
359 writel(val, tp->regs + off);
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
364 return (readl(tp->regs + off));
367 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
369 writel(val, tp->aperegs + off);
372 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
374 return (readl(tp->aperegs + off));
377 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
389 writel(val, tp->regs + off);
390 readl(tp->regs + off);
393 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
409 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
410 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
411 TG3_64BIT_REG_LOW, val);
414 if (off == TG3_RX_STD_PROD_IDX_REG) {
415 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
416 TG3_64BIT_REG_LOW, val);
420 spin_lock_irqsave(&tp->indirect_lock, flags);
421 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
422 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
423 spin_unlock_irqrestore(&tp->indirect_lock, flags);
425 /* In indirect mode when disabling interrupts, we also need
426 * to clear the interrupt bit in the GRC local ctrl register.
428 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
430 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
431 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
435 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 /* usec_wait specifies the wait time in usec when writing to certain registers
448 * where it is unsafe to read back the register without some delay.
449 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
450 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
452 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
454 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
455 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
456 /* Non-posted methods */
457 tp->write32(tp, off, val);
460 tg3_write32(tp, off, val);
465 /* Wait again after the read for the posted method to guarantee that
466 * the wait time is met.
472 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
474 tp->write32_mbox(tp, off, val);
475 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
476 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
477 tp->read32_mbox(tp, off);
480 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
482 void __iomem *mbox = tp->regs + off;
484 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
486 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
490 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
492 return (readl(tp->regs + off + GRCMBOX_BASE));
495 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
497 writel(val, tp->regs + off + GRCMBOX_BASE);
500 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
501 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
502 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
503 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
504 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
506 #define tw32(reg,val) tp->write32(tp, reg, val)
507 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
508 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
509 #define tr32(reg) tp->read32(tp, reg)
511 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
515 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
516 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
519 spin_lock_irqsave(&tp->indirect_lock, flags);
520 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524 /* Always leave this as zero. */
525 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
528 tw32_f(TG3PCI_MEM_WIN_DATA, val);
530 /* Always leave this as zero. */
531 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
540 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
541 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
548 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
549 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
551 /* Always leave this as zero. */
552 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
554 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
555 *val = tr32(TG3PCI_MEM_WIN_DATA);
557 /* Always leave this as zero. */
558 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
563 static void tg3_ape_lock_init(struct tg3 *tp)
567 /* Make sure the driver hasn't any stale locks. */
568 for (i = 0; i < 8; i++)
569 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
570 APE_LOCK_GRANT_DRIVER);
573 static int tg3_ape_lock(struct tg3 *tp, int locknum)
579 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
583 case TG3_APE_LOCK_GRC:
584 case TG3_APE_LOCK_MEM:
592 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
594 /* Wait for up to 1 millisecond to acquire lock. */
595 for (i = 0; i < 100; i++) {
596 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
597 if (status == APE_LOCK_GRANT_DRIVER)
602 if (status != APE_LOCK_GRANT_DRIVER) {
603 /* Revoke the lock request. */
604 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
605 APE_LOCK_GRANT_DRIVER);
613 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
617 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
621 case TG3_APE_LOCK_GRC:
622 case TG3_APE_LOCK_MEM:
629 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
632 static void tg3_disable_ints(struct tg3 *tp)
636 tw32(TG3PCI_MISC_HOST_CTRL,
637 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
638 for (i = 0; i < tp->irq_max; i++)
639 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
642 static void tg3_enable_ints(struct tg3 *tp)
649 tw32(TG3PCI_MISC_HOST_CTRL,
650 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
652 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
653 for (i = 0; i < tp->irq_cnt; i++) {
654 struct tg3_napi *tnapi = &tp->napi[i];
655 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
656 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
657 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
659 tp->coal_now |= tnapi->coal_now;
662 /* Force an initial interrupt */
663 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
664 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
665 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
667 tw32(HOSTCC_MODE, tp->coal_now);
669 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
672 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
674 struct tg3 *tp = tnapi->tp;
675 struct tg3_hw_status *sblk = tnapi->hw_status;
676 unsigned int work_exists = 0;
678 /* check for phy events */
679 if (!(tp->tg3_flags &
680 (TG3_FLAG_USE_LINKCHG_REG |
681 TG3_FLAG_POLL_SERDES))) {
682 if (sblk->status & SD_STATUS_LINK_CHG)
685 /* check for RX/TX work to do */
686 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
687 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
694 * similar to tg3_enable_ints, but it accurately determines whether there
695 * is new work pending and can return without flushing the PIO write
696 * which reenables interrupts
698 static void tg3_int_reenable(struct tg3_napi *tnapi)
700 struct tg3 *tp = tnapi->tp;
702 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
705 /* When doing tagged status, this work check is unnecessary.
706 * The last_tag we write above tells the chip which piece of
707 * work we've completed.
709 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
711 tw32(HOSTCC_MODE, tp->coalesce_mode |
712 HOSTCC_MODE_ENABLE | tnapi->coal_now);
715 static void tg3_napi_disable(struct tg3 *tp)
719 for (i = tp->irq_cnt - 1; i >= 0; i--)
720 napi_disable(&tp->napi[i].napi);
723 static void tg3_napi_enable(struct tg3 *tp)
727 for (i = 0; i < tp->irq_cnt; i++)
728 napi_enable(&tp->napi[i].napi);
731 static inline void tg3_netif_stop(struct tg3 *tp)
733 tp->dev->trans_start = jiffies; /* prevent tx timeout */
734 tg3_napi_disable(tp);
735 netif_tx_disable(tp->dev);
738 static inline void tg3_netif_start(struct tg3 *tp)
740 /* NOTE: unconditional netif_tx_wake_all_queues is only
741 * appropriate so long as all callers are assured to
742 * have free tx slots (such as after tg3_init_hw)
744 netif_tx_wake_all_queues(tp->dev);
747 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
751 static void tg3_switch_clocks(struct tg3 *tp)
756 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
757 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
760 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
762 orig_clock_ctrl = clock_ctrl;
763 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
764 CLOCK_CTRL_CLKRUN_OENABLE |
766 tp->pci_clock_ctrl = clock_ctrl;
768 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
769 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
770 tw32_wait_f(TG3PCI_CLOCK_CTRL,
771 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
773 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
774 tw32_wait_f(TG3PCI_CLOCK_CTRL,
776 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
778 tw32_wait_f(TG3PCI_CLOCK_CTRL,
779 clock_ctrl | (CLOCK_CTRL_ALTCLK),
782 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
785 #define PHY_BUSY_LOOPS 5000
787 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
793 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
795 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
801 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
802 MI_COM_PHY_ADDR_MASK);
803 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
804 MI_COM_REG_ADDR_MASK);
805 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
807 tw32_f(MAC_MI_COM, frame_val);
809 loops = PHY_BUSY_LOOPS;
812 frame_val = tr32(MAC_MI_COM);
814 if ((frame_val & MI_COM_BUSY) == 0) {
816 frame_val = tr32(MAC_MI_COM);
824 *val = frame_val & MI_COM_DATA_MASK;
828 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
829 tw32_f(MAC_MI_MODE, tp->mi_mode);
836 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
842 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
843 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
848 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
852 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
853 MI_COM_PHY_ADDR_MASK);
854 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
855 MI_COM_REG_ADDR_MASK);
856 frame_val |= (val & MI_COM_DATA_MASK);
857 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
859 tw32_f(MAC_MI_COM, frame_val);
861 loops = PHY_BUSY_LOOPS;
864 frame_val = tr32(MAC_MI_COM);
865 if ((frame_val & MI_COM_BUSY) == 0) {
867 frame_val = tr32(MAC_MI_COM);
877 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
878 tw32_f(MAC_MI_MODE, tp->mi_mode);
885 static int tg3_bmcr_reset(struct tg3 *tp)
890 /* OK, reset it, and poll the BMCR_RESET bit until it
891 * clears or we time out.
893 phy_control = BMCR_RESET;
894 err = tg3_writephy(tp, MII_BMCR, phy_control);
900 err = tg3_readphy(tp, MII_BMCR, &phy_control);
904 if ((phy_control & BMCR_RESET) == 0) {
916 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
918 struct tg3 *tp = bp->priv;
921 spin_lock_bh(&tp->lock);
923 if (tg3_readphy(tp, reg, &val))
926 spin_unlock_bh(&tp->lock);
931 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
933 struct tg3 *tp = bp->priv;
936 spin_lock_bh(&tp->lock);
938 if (tg3_writephy(tp, reg, val))
941 spin_unlock_bh(&tp->lock);
946 static int tg3_mdio_reset(struct mii_bus *bp)
951 static void tg3_mdio_config_5785(struct tg3 *tp)
954 struct phy_device *phydev;
956 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
957 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
958 case TG3_PHY_ID_BCM50610:
959 case TG3_PHY_ID_BCM50610M:
960 val = MAC_PHYCFG2_50610_LED_MODES;
962 case TG3_PHY_ID_BCMAC131:
963 val = MAC_PHYCFG2_AC131_LED_MODES;
965 case TG3_PHY_ID_RTL8211C:
966 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
968 case TG3_PHY_ID_RTL8201E:
969 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
975 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
976 tw32(MAC_PHYCFG2, val);
978 val = tr32(MAC_PHYCFG1);
979 val &= ~(MAC_PHYCFG1_RGMII_INT |
980 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
981 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
982 tw32(MAC_PHYCFG1, val);
987 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
988 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
989 MAC_PHYCFG2_FMODE_MASK_MASK |
990 MAC_PHYCFG2_GMODE_MASK_MASK |
991 MAC_PHYCFG2_ACT_MASK_MASK |
992 MAC_PHYCFG2_QUAL_MASK_MASK |
993 MAC_PHYCFG2_INBAND_ENABLE;
995 tw32(MAC_PHYCFG2, val);
997 val = tr32(MAC_PHYCFG1);
998 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
999 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1000 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1002 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1003 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1004 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1006 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1007 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1008 tw32(MAC_PHYCFG1, val);
1010 val = tr32(MAC_EXT_RGMII_MODE);
1011 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1012 MAC_RGMII_MODE_RX_QUALITY |
1013 MAC_RGMII_MODE_RX_ACTIVITY |
1014 MAC_RGMII_MODE_RX_ENG_DET |
1015 MAC_RGMII_MODE_TX_ENABLE |
1016 MAC_RGMII_MODE_TX_LOWPWR |
1017 MAC_RGMII_MODE_TX_RESET);
1018 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1019 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1020 val |= MAC_RGMII_MODE_RX_INT_B |
1021 MAC_RGMII_MODE_RX_QUALITY |
1022 MAC_RGMII_MODE_RX_ACTIVITY |
1023 MAC_RGMII_MODE_RX_ENG_DET;
1024 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1025 val |= MAC_RGMII_MODE_TX_ENABLE |
1026 MAC_RGMII_MODE_TX_LOWPWR |
1027 MAC_RGMII_MODE_TX_RESET;
1029 tw32(MAC_EXT_RGMII_MODE, val);
1032 static void tg3_mdio_start(struct tg3 *tp)
1034 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1035 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1039 u32 funcnum, is_serdes;
1041 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1047 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1048 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1050 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1051 TG3_CPMU_PHY_STRAP_IS_SERDES;
1055 tp->phy_addr = TG3_PHY_MII_ADDR;
1057 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1059 tg3_mdio_config_5785(tp);
1062 static int tg3_mdio_init(struct tg3 *tp)
1066 struct phy_device *phydev;
1070 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1071 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1074 tp->mdio_bus = mdiobus_alloc();
1075 if (tp->mdio_bus == NULL)
1078 tp->mdio_bus->name = "tg3 mdio bus";
1079 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1080 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1081 tp->mdio_bus->priv = tp;
1082 tp->mdio_bus->parent = &tp->pdev->dev;
1083 tp->mdio_bus->read = &tg3_mdio_read;
1084 tp->mdio_bus->write = &tg3_mdio_write;
1085 tp->mdio_bus->reset = &tg3_mdio_reset;
1086 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1087 tp->mdio_bus->irq = &tp->mdio_irq[0];
1089 for (i = 0; i < PHY_MAX_ADDR; i++)
1090 tp->mdio_bus->irq[i] = PHY_POLL;
1092 /* The bus registration will look for all the PHYs on the mdio bus.
1093 * Unfortunately, it does not ensure the PHY is powered up before
1094 * accessing the PHY ID registers. A chip reset is the
1095 * quickest way to bring the device back to an operational state..
1097 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1100 i = mdiobus_register(tp->mdio_bus);
1102 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1104 mdiobus_free(tp->mdio_bus);
1108 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1110 if (!phydev || !phydev->drv) {
1111 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1112 mdiobus_unregister(tp->mdio_bus);
1113 mdiobus_free(tp->mdio_bus);
1117 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1118 case TG3_PHY_ID_BCM57780:
1119 phydev->interface = PHY_INTERFACE_MODE_GMII;
1120 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 case TG3_PHY_ID_BCM50610:
1123 case TG3_PHY_ID_BCM50610M:
1124 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1125 PHY_BRCM_RX_REFCLK_UNUSED |
1126 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1127 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1128 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1129 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1131 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1132 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1133 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1135 case TG3_PHY_ID_RTL8211C:
1136 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1138 case TG3_PHY_ID_RTL8201E:
1139 case TG3_PHY_ID_BCMAC131:
1140 phydev->interface = PHY_INTERFACE_MODE_MII;
1141 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1142 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1146 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1149 tg3_mdio_config_5785(tp);
1154 static void tg3_mdio_fini(struct tg3 *tp)
1156 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1157 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1158 mdiobus_unregister(tp->mdio_bus);
1159 mdiobus_free(tp->mdio_bus);
1163 /* tp->lock is held. */
1164 static inline void tg3_generate_fw_event(struct tg3 *tp)
1168 val = tr32(GRC_RX_CPU_EVENT);
1169 val |= GRC_RX_CPU_DRIVER_EVENT;
1170 tw32_f(GRC_RX_CPU_EVENT, val);
1172 tp->last_event_jiffies = jiffies;
1175 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1177 /* tp->lock is held. */
1178 static void tg3_wait_for_event_ack(struct tg3 *tp)
1181 unsigned int delay_cnt;
1184 /* If enough time has passed, no wait is necessary. */
1185 time_remain = (long)(tp->last_event_jiffies + 1 +
1186 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1188 if (time_remain < 0)
1191 /* Check if we can shorten the wait time. */
1192 delay_cnt = jiffies_to_usecs(time_remain);
1193 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1194 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1195 delay_cnt = (delay_cnt >> 3) + 1;
1197 for (i = 0; i < delay_cnt; i++) {
1198 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1204 /* tp->lock is held. */
1205 static void tg3_ump_link_report(struct tg3 *tp)
1210 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1211 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1214 tg3_wait_for_event_ack(tp);
1216 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1218 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1221 if (!tg3_readphy(tp, MII_BMCR, ®))
1223 if (!tg3_readphy(tp, MII_BMSR, ®))
1224 val |= (reg & 0xffff);
1225 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1228 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1230 if (!tg3_readphy(tp, MII_LPA, ®))
1231 val |= (reg & 0xffff);
1232 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1235 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1236 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1238 if (!tg3_readphy(tp, MII_STAT1000, ®))
1239 val |= (reg & 0xffff);
1241 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1243 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1247 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1249 tg3_generate_fw_event(tp);
1252 static void tg3_link_report(struct tg3 *tp)
1254 if (!netif_carrier_ok(tp->dev)) {
1255 if (netif_msg_link(tp))
1256 printk(KERN_INFO PFX "%s: Link is down.\n",
1258 tg3_ump_link_report(tp);
1259 } else if (netif_msg_link(tp)) {
1260 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1262 (tp->link_config.active_speed == SPEED_1000 ?
1264 (tp->link_config.active_speed == SPEED_100 ?
1266 (tp->link_config.active_duplex == DUPLEX_FULL ?
1269 printk(KERN_INFO PFX
1270 "%s: Flow control is %s for TX and %s for RX.\n",
1272 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1274 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1276 tg3_ump_link_report(tp);
1280 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1284 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1285 miireg = ADVERTISE_PAUSE_CAP;
1286 else if (flow_ctrl & FLOW_CTRL_TX)
1287 miireg = ADVERTISE_PAUSE_ASYM;
1288 else if (flow_ctrl & FLOW_CTRL_RX)
1289 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1296 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1300 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1301 miireg = ADVERTISE_1000XPAUSE;
1302 else if (flow_ctrl & FLOW_CTRL_TX)
1303 miireg = ADVERTISE_1000XPSE_ASYM;
1304 else if (flow_ctrl & FLOW_CTRL_RX)
1305 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1312 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1316 if (lcladv & ADVERTISE_1000XPAUSE) {
1317 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1318 if (rmtadv & LPA_1000XPAUSE)
1319 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1320 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1323 if (rmtadv & LPA_1000XPAUSE)
1324 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1326 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1327 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1334 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1338 u32 old_rx_mode = tp->rx_mode;
1339 u32 old_tx_mode = tp->tx_mode;
1341 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1342 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1344 autoneg = tp->link_config.autoneg;
1346 if (autoneg == AUTONEG_ENABLE &&
1347 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1348 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1349 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1351 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1353 flowctrl = tp->link_config.flowctrl;
1355 tp->link_config.active_flowctrl = flowctrl;
1357 if (flowctrl & FLOW_CTRL_RX)
1358 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1360 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1362 if (old_rx_mode != tp->rx_mode)
1363 tw32_f(MAC_RX_MODE, tp->rx_mode);
1365 if (flowctrl & FLOW_CTRL_TX)
1366 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1368 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1370 if (old_tx_mode != tp->tx_mode)
1371 tw32_f(MAC_TX_MODE, tp->tx_mode);
1374 static void tg3_adjust_link(struct net_device *dev)
1376 u8 oldflowctrl, linkmesg = 0;
1377 u32 mac_mode, lcl_adv, rmt_adv;
1378 struct tg3 *tp = netdev_priv(dev);
1379 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1381 spin_lock_bh(&tp->lock);
1383 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1384 MAC_MODE_HALF_DUPLEX);
1386 oldflowctrl = tp->link_config.active_flowctrl;
1392 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1393 mac_mode |= MAC_MODE_PORT_MODE_MII;
1394 else if (phydev->speed == SPEED_1000 ||
1395 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1396 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1398 mac_mode |= MAC_MODE_PORT_MODE_MII;
1400 if (phydev->duplex == DUPLEX_HALF)
1401 mac_mode |= MAC_MODE_HALF_DUPLEX;
1403 lcl_adv = tg3_advert_flowctrl_1000T(
1404 tp->link_config.flowctrl);
1407 rmt_adv = LPA_PAUSE_CAP;
1408 if (phydev->asym_pause)
1409 rmt_adv |= LPA_PAUSE_ASYM;
1412 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1414 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1416 if (mac_mode != tp->mac_mode) {
1417 tp->mac_mode = mac_mode;
1418 tw32_f(MAC_MODE, tp->mac_mode);
1422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1423 if (phydev->speed == SPEED_10)
1425 MAC_MI_STAT_10MBPS_MODE |
1426 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1428 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1431 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1432 tw32(MAC_TX_LENGTHS,
1433 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1434 (6 << TX_LENGTHS_IPG_SHIFT) |
1435 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1437 tw32(MAC_TX_LENGTHS,
1438 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1439 (6 << TX_LENGTHS_IPG_SHIFT) |
1440 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1442 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1443 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1444 phydev->speed != tp->link_config.active_speed ||
1445 phydev->duplex != tp->link_config.active_duplex ||
1446 oldflowctrl != tp->link_config.active_flowctrl)
1449 tp->link_config.active_speed = phydev->speed;
1450 tp->link_config.active_duplex = phydev->duplex;
1452 spin_unlock_bh(&tp->lock);
1455 tg3_link_report(tp);
1458 static int tg3_phy_init(struct tg3 *tp)
1460 struct phy_device *phydev;
1462 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1465 /* Bring the PHY back to a known state. */
1468 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1470 /* Attach the MAC to the PHY. */
1471 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1472 phydev->dev_flags, phydev->interface);
1473 if (IS_ERR(phydev)) {
1474 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1475 return PTR_ERR(phydev);
1478 /* Mask with MAC supported features. */
1479 switch (phydev->interface) {
1480 case PHY_INTERFACE_MODE_GMII:
1481 case PHY_INTERFACE_MODE_RGMII:
1482 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1483 phydev->supported &= (PHY_GBIT_FEATURES |
1485 SUPPORTED_Asym_Pause);
1489 case PHY_INTERFACE_MODE_MII:
1490 phydev->supported &= (PHY_BASIC_FEATURES |
1492 SUPPORTED_Asym_Pause);
1495 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1499 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1501 phydev->advertising = phydev->supported;
1506 static void tg3_phy_start(struct tg3 *tp)
1508 struct phy_device *phydev;
1510 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1513 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1515 if (tp->link_config.phy_is_low_power) {
1516 tp->link_config.phy_is_low_power = 0;
1517 phydev->speed = tp->link_config.orig_speed;
1518 phydev->duplex = tp->link_config.orig_duplex;
1519 phydev->autoneg = tp->link_config.orig_autoneg;
1520 phydev->advertising = tp->link_config.orig_advertising;
1525 phy_start_aneg(phydev);
1528 static void tg3_phy_stop(struct tg3 *tp)
1530 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1533 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1536 static void tg3_phy_fini(struct tg3 *tp)
1538 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1539 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1540 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1544 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1546 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1547 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1550 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1554 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1557 tg3_writephy(tp, MII_TG3_FET_TEST,
1558 phytest | MII_TG3_FET_SHADOW_EN);
1559 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1561 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1563 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1564 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1566 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1570 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1574 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1575 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1576 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1579 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1580 tg3_phy_fet_toggle_apd(tp, enable);
1584 reg = MII_TG3_MISC_SHDW_WREN |
1585 MII_TG3_MISC_SHDW_SCR5_SEL |
1586 MII_TG3_MISC_SHDW_SCR5_LPED |
1587 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1588 MII_TG3_MISC_SHDW_SCR5_SDTL |
1589 MII_TG3_MISC_SHDW_SCR5_C125OE;
1590 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1591 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1593 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1596 reg = MII_TG3_MISC_SHDW_WREN |
1597 MII_TG3_MISC_SHDW_APD_SEL |
1598 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1600 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1602 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1605 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1609 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1610 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1613 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1616 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1617 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1619 tg3_writephy(tp, MII_TG3_FET_TEST,
1620 ephy | MII_TG3_FET_SHADOW_EN);
1621 if (!tg3_readphy(tp, reg, &phy)) {
1623 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1625 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1626 tg3_writephy(tp, reg, phy);
1628 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1631 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1632 MII_TG3_AUXCTL_SHDWSEL_MISC;
1633 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1634 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1636 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1638 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1639 phy |= MII_TG3_AUXCTL_MISC_WREN;
1640 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1645 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1649 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1652 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1653 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1654 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1655 (val | (1 << 15) | (1 << 4)));
1658 static void tg3_phy_apply_otp(struct tg3 *tp)
1667 /* Enable SM_DSP clock and tx 6dB coding. */
1668 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1669 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1670 MII_TG3_AUXCTL_ACTL_TX_6DB;
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1673 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1674 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1675 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1677 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1678 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1679 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1681 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1682 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1683 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1685 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1686 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1688 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1689 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1691 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1692 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1693 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1695 /* Turn off SM_DSP clock. */
1696 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1697 MII_TG3_AUXCTL_ACTL_TX_6DB;
1698 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1701 static int tg3_wait_macro_done(struct tg3 *tp)
1708 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1709 if ((tmp32 & 0x1000) == 0)
1719 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1721 static const u32 test_pat[4][6] = {
1722 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1723 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1724 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1725 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1729 for (chan = 0; chan < 4; chan++) {
1732 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1733 (chan * 0x2000) | 0x0200);
1734 tg3_writephy(tp, 0x16, 0x0002);
1736 for (i = 0; i < 6; i++)
1737 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1740 tg3_writephy(tp, 0x16, 0x0202);
1741 if (tg3_wait_macro_done(tp)) {
1746 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1747 (chan * 0x2000) | 0x0200);
1748 tg3_writephy(tp, 0x16, 0x0082);
1749 if (tg3_wait_macro_done(tp)) {
1754 tg3_writephy(tp, 0x16, 0x0802);
1755 if (tg3_wait_macro_done(tp)) {
1760 for (i = 0; i < 6; i += 2) {
1763 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1764 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1765 tg3_wait_macro_done(tp)) {
1771 if (low != test_pat[chan][i] ||
1772 high != test_pat[chan][i+1]) {
1773 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1775 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1785 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1789 for (chan = 0; chan < 4; chan++) {
1792 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1793 (chan * 0x2000) | 0x0200);
1794 tg3_writephy(tp, 0x16, 0x0002);
1795 for (i = 0; i < 6; i++)
1796 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1797 tg3_writephy(tp, 0x16, 0x0202);
1798 if (tg3_wait_macro_done(tp))
1805 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1807 u32 reg32, phy9_orig;
1808 int retries, do_phy_reset, err;
1814 err = tg3_bmcr_reset(tp);
1820 /* Disable transmitter and interrupt. */
1821 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1825 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1827 /* Set full-duplex, 1000 mbps. */
1828 tg3_writephy(tp, MII_BMCR,
1829 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1831 /* Set to master mode. */
1832 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1835 tg3_writephy(tp, MII_TG3_CTRL,
1836 (MII_TG3_CTRL_AS_MASTER |
1837 MII_TG3_CTRL_ENABLE_AS_MASTER));
1839 /* Enable SM_DSP_CLOCK and 6dB. */
1840 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842 /* Block the PHY control access. */
1843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1844 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1846 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1849 } while (--retries);
1851 err = tg3_phy_reset_chanpat(tp);
1855 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1858 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1859 tg3_writephy(tp, 0x16, 0x0000);
1861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1863 /* Set Extended packet length bit for jumbo frames */
1864 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1867 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1870 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1872 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1874 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1881 /* This will reset the tigon3 PHY if there is no valid
1882 * link unless the FORCE argument is non-zero.
1884 static int tg3_phy_reset(struct tg3 *tp)
1890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1893 val = tr32(GRC_MISC_CFG);
1894 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1897 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1898 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1902 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1903 netif_carrier_off(tp->dev);
1904 tg3_link_report(tp);
1907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1910 err = tg3_phy_reset_5703_4_5(tp);
1917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1918 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1919 cpmuctrl = tr32(TG3_CPMU_CTRL);
1920 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1922 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1925 err = tg3_bmcr_reset(tp);
1929 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1932 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1933 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1935 tw32(TG3_CPMU_CTRL, cpmuctrl);
1938 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1939 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1942 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1943 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1944 CPMU_LSPD_1000MB_MACCLK_12_5) {
1945 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1947 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1952 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1955 tg3_phy_apply_otp(tp);
1957 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1958 tg3_phy_toggle_apd(tp, true);
1960 tg3_phy_toggle_apd(tp, false);
1963 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1964 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1965 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1966 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1967 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1968 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1969 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1971 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1972 tg3_writephy(tp, 0x1c, 0x8d68);
1973 tg3_writephy(tp, 0x1c, 0x8d68);
1975 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1976 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1977 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1978 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1979 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1980 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1981 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1982 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1983 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1985 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1986 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1987 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1988 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1989 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1990 tg3_writephy(tp, MII_TG3_TEST1,
1991 MII_TG3_TEST1_TRIM_EN | 0x4);
1993 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1994 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1996 /* Set Extended packet length bit (bit 14) on all chips that */
1997 /* support jumbo frames */
1998 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1999 /* Cannot do read-modify-write on 5401 */
2000 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2001 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2004 /* Set bit 14 with read-modify-write to preserve other bits */
2005 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2006 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2007 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2010 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2011 * jumbo frames transmission.
2013 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2016 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2017 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2018 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2022 /* adjust output voltage */
2023 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2026 tg3_phy_toggle_automdix(tp, 1);
2027 tg3_phy_set_wirespeed(tp);
2031 static void tg3_frob_aux_power(struct tg3 *tp)
2033 struct tg3 *tp_peer = tp;
2035 /* The GPIOs do something completely different on 57765. */
2036 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2043 struct net_device *dev_peer;
2045 dev_peer = pci_get_drvdata(tp->pdev_peer);
2046 /* remove_one() may have been run on the peer. */
2050 tp_peer = netdev_priv(dev_peer);
2053 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2054 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2055 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2056 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2059 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2060 (GRC_LCLCTRL_GPIO_OE0 |
2061 GRC_LCLCTRL_GPIO_OE1 |
2062 GRC_LCLCTRL_GPIO_OE2 |
2063 GRC_LCLCTRL_GPIO_OUTPUT0 |
2064 GRC_LCLCTRL_GPIO_OUTPUT1),
2066 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2068 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2069 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2070 GRC_LCLCTRL_GPIO_OE1 |
2071 GRC_LCLCTRL_GPIO_OE2 |
2072 GRC_LCLCTRL_GPIO_OUTPUT0 |
2073 GRC_LCLCTRL_GPIO_OUTPUT1 |
2075 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2077 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2078 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2080 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2081 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2084 u32 grc_local_ctrl = 0;
2086 if (tp_peer != tp &&
2087 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2090 /* Workaround to prevent overdrawing Amps. */
2091 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2093 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 grc_local_ctrl, 100);
2098 /* On 5753 and variants, GPIO2 cannot be used. */
2099 no_gpio2 = tp->nic_sram_data_cfg &
2100 NIC_SRAM_DATA_CFG_NO_GPIO2;
2102 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2103 GRC_LCLCTRL_GPIO_OE1 |
2104 GRC_LCLCTRL_GPIO_OE2 |
2105 GRC_LCLCTRL_GPIO_OUTPUT1 |
2106 GRC_LCLCTRL_GPIO_OUTPUT2;
2108 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2109 GRC_LCLCTRL_GPIO_OUTPUT2);
2111 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2112 grc_local_ctrl, 100);
2114 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2116 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2117 grc_local_ctrl, 100);
2120 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2121 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2122 grc_local_ctrl, 100);
2126 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2127 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2128 if (tp_peer != tp &&
2129 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2132 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2133 (GRC_LCLCTRL_GPIO_OE1 |
2134 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2137 GRC_LCLCTRL_GPIO_OE1, 100);
2139 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2140 (GRC_LCLCTRL_GPIO_OE1 |
2141 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2146 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2148 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2150 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2151 if (speed != SPEED_10)
2153 } else if (speed == SPEED_10)
2159 static int tg3_setup_phy(struct tg3 *, int);
2161 #define RESET_KIND_SHUTDOWN 0
2162 #define RESET_KIND_INIT 1
2163 #define RESET_KIND_SUSPEND 2
2165 static void tg3_write_sig_post_reset(struct tg3 *, int);
2166 static int tg3_halt_cpu(struct tg3 *, u32);
2168 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2172 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2174 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2175 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2178 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2179 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2180 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2187 val = tr32(GRC_MISC_CFG);
2188 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2191 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2193 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2196 tg3_writephy(tp, MII_ADVERTISE, 0);
2197 tg3_writephy(tp, MII_BMCR,
2198 BMCR_ANENABLE | BMCR_ANRESTART);
2200 tg3_writephy(tp, MII_TG3_FET_TEST,
2201 phytest | MII_TG3_FET_SHADOW_EN);
2202 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2203 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2205 MII_TG3_FET_SHDW_AUXMODE4,
2208 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2211 } else if (do_low_power) {
2212 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2213 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2215 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2216 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2217 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2218 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2219 MII_TG3_AUXCTL_PCTL_VREG_11V);
2222 /* The PHY should not be powered down on some chips because
2225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2227 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2228 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2231 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2232 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2233 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2234 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2235 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2236 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2239 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2242 /* tp->lock is held. */
2243 static int tg3_nvram_lock(struct tg3 *tp)
2245 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2248 if (tp->nvram_lock_cnt == 0) {
2249 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2250 for (i = 0; i < 8000; i++) {
2251 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2256 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2260 tp->nvram_lock_cnt++;
2265 /* tp->lock is held. */
2266 static void tg3_nvram_unlock(struct tg3 *tp)
2268 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2269 if (tp->nvram_lock_cnt > 0)
2270 tp->nvram_lock_cnt--;
2271 if (tp->nvram_lock_cnt == 0)
2272 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2276 /* tp->lock is held. */
2277 static void tg3_enable_nvram_access(struct tg3 *tp)
2279 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2280 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2281 u32 nvaccess = tr32(NVRAM_ACCESS);
2283 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2287 /* tp->lock is held. */
2288 static void tg3_disable_nvram_access(struct tg3 *tp)
2290 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2291 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2292 u32 nvaccess = tr32(NVRAM_ACCESS);
2294 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2298 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2299 u32 offset, u32 *val)
2304 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2307 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2308 EEPROM_ADDR_DEVID_MASK |
2310 tw32(GRC_EEPROM_ADDR,
2312 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2313 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2314 EEPROM_ADDR_ADDR_MASK) |
2315 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2317 for (i = 0; i < 1000; i++) {
2318 tmp = tr32(GRC_EEPROM_ADDR);
2320 if (tmp & EEPROM_ADDR_COMPLETE)
2324 if (!(tmp & EEPROM_ADDR_COMPLETE))
2327 tmp = tr32(GRC_EEPROM_DATA);
2330 * The data will always be opposite the native endian
2331 * format. Perform a blind byteswap to compensate.
2338 #define NVRAM_CMD_TIMEOUT 10000
2340 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2344 tw32(NVRAM_CMD, nvram_cmd);
2345 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2347 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2353 if (i == NVRAM_CMD_TIMEOUT)
2359 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2361 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2362 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2363 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2364 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2365 (tp->nvram_jedecnum == JEDEC_ATMEL))
2367 addr = ((addr / tp->nvram_pagesize) <<
2368 ATMEL_AT45DB0X1B_PAGE_POS) +
2369 (addr % tp->nvram_pagesize);
2374 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2376 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2377 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2378 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2379 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2380 (tp->nvram_jedecnum == JEDEC_ATMEL))
2382 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2383 tp->nvram_pagesize) +
2384 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2389 /* NOTE: Data read in from NVRAM is byteswapped according to
2390 * the byteswapping settings for all other register accesses.
2391 * tg3 devices are BE devices, so on a BE machine, the data
2392 * returned will be exactly as it is seen in NVRAM. On a LE
2393 * machine, the 32-bit value will be byteswapped.
2395 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2399 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2400 return tg3_nvram_read_using_eeprom(tp, offset, val);
2402 offset = tg3_nvram_phys_addr(tp, offset);
2404 if (offset > NVRAM_ADDR_MSK)
2407 ret = tg3_nvram_lock(tp);
2411 tg3_enable_nvram_access(tp);
2413 tw32(NVRAM_ADDR, offset);
2414 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2415 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2418 *val = tr32(NVRAM_RDDATA);
2420 tg3_disable_nvram_access(tp);
2422 tg3_nvram_unlock(tp);
2427 /* Ensures NVRAM data is in bytestream format. */
2428 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2431 int res = tg3_nvram_read(tp, offset, &v);
2433 *val = cpu_to_be32(v);
2437 /* tp->lock is held. */
2438 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2440 u32 addr_high, addr_low;
2443 addr_high = ((tp->dev->dev_addr[0] << 8) |
2444 tp->dev->dev_addr[1]);
2445 addr_low = ((tp->dev->dev_addr[2] << 24) |
2446 (tp->dev->dev_addr[3] << 16) |
2447 (tp->dev->dev_addr[4] << 8) |
2448 (tp->dev->dev_addr[5] << 0));
2449 for (i = 0; i < 4; i++) {
2450 if (i == 1 && skip_mac_1)
2452 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2453 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2458 for (i = 0; i < 12; i++) {
2459 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2460 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2464 addr_high = (tp->dev->dev_addr[0] +
2465 tp->dev->dev_addr[1] +
2466 tp->dev->dev_addr[2] +
2467 tp->dev->dev_addr[3] +
2468 tp->dev->dev_addr[4] +
2469 tp->dev->dev_addr[5]) &
2470 TX_BACKOFF_SEED_MASK;
2471 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2474 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2477 bool device_should_wake, do_low_power;
2479 /* Make sure register accesses (indirect or otherwise)
2480 * will function correctly.
2482 pci_write_config_dword(tp->pdev,
2483 TG3PCI_MISC_HOST_CTRL,
2484 tp->misc_host_ctrl);
2488 pci_enable_wake(tp->pdev, state, false);
2489 pci_set_power_state(tp->pdev, PCI_D0);
2491 /* Switch out of Vaux if it is a NIC */
2492 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2493 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2503 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2504 tp->dev->name, state);
2508 /* Restore the CLKREQ setting. */
2509 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2512 pci_read_config_word(tp->pdev,
2513 tp->pcie_cap + PCI_EXP_LNKCTL,
2515 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2516 pci_write_config_word(tp->pdev,
2517 tp->pcie_cap + PCI_EXP_LNKCTL,
2521 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2522 tw32(TG3PCI_MISC_HOST_CTRL,
2523 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2525 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2526 device_may_wakeup(&tp->pdev->dev) &&
2527 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2529 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2530 do_low_power = false;
2531 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2532 !tp->link_config.phy_is_low_power) {
2533 struct phy_device *phydev;
2534 u32 phyid, advertising;
2536 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2538 tp->link_config.phy_is_low_power = 1;
2540 tp->link_config.orig_speed = phydev->speed;
2541 tp->link_config.orig_duplex = phydev->duplex;
2542 tp->link_config.orig_autoneg = phydev->autoneg;
2543 tp->link_config.orig_advertising = phydev->advertising;
2545 advertising = ADVERTISED_TP |
2547 ADVERTISED_Autoneg |
2548 ADVERTISED_10baseT_Half;
2550 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2551 device_should_wake) {
2552 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2554 ADVERTISED_100baseT_Half |
2555 ADVERTISED_100baseT_Full |
2556 ADVERTISED_10baseT_Full;
2558 advertising |= ADVERTISED_10baseT_Full;
2561 phydev->advertising = advertising;
2563 phy_start_aneg(phydev);
2565 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2566 if (phyid != TG3_PHY_ID_BCMAC131) {
2567 phyid &= TG3_PHY_OUI_MASK;
2568 if (phyid == TG3_PHY_OUI_1 ||
2569 phyid == TG3_PHY_OUI_2 ||
2570 phyid == TG3_PHY_OUI_3)
2571 do_low_power = true;
2575 do_low_power = true;
2577 if (tp->link_config.phy_is_low_power == 0) {
2578 tp->link_config.phy_is_low_power = 1;
2579 tp->link_config.orig_speed = tp->link_config.speed;
2580 tp->link_config.orig_duplex = tp->link_config.duplex;
2581 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2584 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2585 tp->link_config.speed = SPEED_10;
2586 tp->link_config.duplex = DUPLEX_HALF;
2587 tp->link_config.autoneg = AUTONEG_ENABLE;
2588 tg3_setup_phy(tp, 0);
2592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2595 val = tr32(GRC_VCPU_EXT_CTRL);
2596 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2597 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2601 for (i = 0; i < 200; i++) {
2602 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2603 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2608 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2609 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2610 WOL_DRV_STATE_SHUTDOWN |
2614 if (device_should_wake) {
2617 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2619 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2623 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2624 mac_mode = MAC_MODE_PORT_MODE_GMII;
2626 mac_mode = MAC_MODE_PORT_MODE_MII;
2628 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2629 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2631 u32 speed = (tp->tg3_flags &
2632 TG3_FLAG_WOL_SPEED_100MB) ?
2633 SPEED_100 : SPEED_10;
2634 if (tg3_5700_link_polarity(tp, speed))
2635 mac_mode |= MAC_MODE_LINK_POLARITY;
2637 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2640 mac_mode = MAC_MODE_PORT_MODE_TBI;
2643 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2644 tw32(MAC_LED_CTRL, tp->led_ctrl);
2646 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2647 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2648 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2649 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2650 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2651 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2653 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2654 mac_mode |= tp->mac_mode &
2655 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2656 if (mac_mode & MAC_MODE_APE_TX_EN)
2657 mac_mode |= MAC_MODE_TDE_ENABLE;
2660 tw32_f(MAC_MODE, mac_mode);
2663 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2667 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2668 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2672 base_val = tp->pci_clock_ctrl;
2673 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2674 CLOCK_CTRL_TXCLK_DISABLE);
2676 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2677 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2678 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2679 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2680 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2682 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2683 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2684 u32 newbits1, newbits2;
2686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2688 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2689 CLOCK_CTRL_TXCLK_DISABLE |
2691 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2692 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2693 newbits1 = CLOCK_CTRL_625_CORE;
2694 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2696 newbits1 = CLOCK_CTRL_ALTCLK;
2697 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2700 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2703 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2706 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2710 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2711 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2712 CLOCK_CTRL_TXCLK_DISABLE |
2713 CLOCK_CTRL_44MHZ_CORE);
2715 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2718 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2719 tp->pci_clock_ctrl | newbits3, 40);
2723 if (!(device_should_wake) &&
2724 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2725 tg3_power_down_phy(tp, do_low_power);
2727 tg3_frob_aux_power(tp);
2729 /* Workaround for unstable PLL clock */
2730 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2731 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2732 u32 val = tr32(0x7d00);
2734 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2736 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2739 err = tg3_nvram_lock(tp);
2740 tg3_halt_cpu(tp, RX_CPU_BASE);
2742 tg3_nvram_unlock(tp);
2746 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2748 if (device_should_wake)
2749 pci_enable_wake(tp->pdev, state, true);
2751 /* Finally, set the new power state. */
2752 pci_set_power_state(tp->pdev, state);
2757 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2759 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2760 case MII_TG3_AUX_STAT_10HALF:
2762 *duplex = DUPLEX_HALF;
2765 case MII_TG3_AUX_STAT_10FULL:
2767 *duplex = DUPLEX_FULL;
2770 case MII_TG3_AUX_STAT_100HALF:
2772 *duplex = DUPLEX_HALF;
2775 case MII_TG3_AUX_STAT_100FULL:
2777 *duplex = DUPLEX_FULL;
2780 case MII_TG3_AUX_STAT_1000HALF:
2781 *speed = SPEED_1000;
2782 *duplex = DUPLEX_HALF;
2785 case MII_TG3_AUX_STAT_1000FULL:
2786 *speed = SPEED_1000;
2787 *duplex = DUPLEX_FULL;
2791 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2792 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2794 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2798 *speed = SPEED_INVALID;
2799 *duplex = DUPLEX_INVALID;
2804 static void tg3_phy_copper_begin(struct tg3 *tp)
2809 if (tp->link_config.phy_is_low_power) {
2810 /* Entering low power mode. Disable gigabit and
2811 * 100baseT advertisements.
2813 tg3_writephy(tp, MII_TG3_CTRL, 0);
2815 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2816 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2817 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2818 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2820 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2821 } else if (tp->link_config.speed == SPEED_INVALID) {
2822 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2823 tp->link_config.advertising &=
2824 ~(ADVERTISED_1000baseT_Half |
2825 ADVERTISED_1000baseT_Full);
2827 new_adv = ADVERTISE_CSMA;
2828 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2829 new_adv |= ADVERTISE_10HALF;
2830 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2831 new_adv |= ADVERTISE_10FULL;
2832 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2833 new_adv |= ADVERTISE_100HALF;
2834 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2835 new_adv |= ADVERTISE_100FULL;
2837 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2839 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2841 if (tp->link_config.advertising &
2842 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2844 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2845 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2846 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2847 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2848 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2849 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2850 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2851 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2852 MII_TG3_CTRL_ENABLE_AS_MASTER);
2853 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2855 tg3_writephy(tp, MII_TG3_CTRL, 0);
2858 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2859 new_adv |= ADVERTISE_CSMA;
2861 /* Asking for a specific link mode. */
2862 if (tp->link_config.speed == SPEED_1000) {
2863 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2865 if (tp->link_config.duplex == DUPLEX_FULL)
2866 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2868 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2869 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2870 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2871 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2872 MII_TG3_CTRL_ENABLE_AS_MASTER);
2874 if (tp->link_config.speed == SPEED_100) {
2875 if (tp->link_config.duplex == DUPLEX_FULL)
2876 new_adv |= ADVERTISE_100FULL;
2878 new_adv |= ADVERTISE_100HALF;
2880 if (tp->link_config.duplex == DUPLEX_FULL)
2881 new_adv |= ADVERTISE_10FULL;
2883 new_adv |= ADVERTISE_10HALF;
2885 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2890 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2893 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2894 tp->link_config.speed != SPEED_INVALID) {
2895 u32 bmcr, orig_bmcr;
2897 tp->link_config.active_speed = tp->link_config.speed;
2898 tp->link_config.active_duplex = tp->link_config.duplex;
2901 switch (tp->link_config.speed) {
2907 bmcr |= BMCR_SPEED100;
2911 bmcr |= TG3_BMCR_SPEED1000;
2915 if (tp->link_config.duplex == DUPLEX_FULL)
2916 bmcr |= BMCR_FULLDPLX;
2918 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2919 (bmcr != orig_bmcr)) {
2920 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2921 for (i = 0; i < 1500; i++) {
2925 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2926 tg3_readphy(tp, MII_BMSR, &tmp))
2928 if (!(tmp & BMSR_LSTATUS)) {
2933 tg3_writephy(tp, MII_BMCR, bmcr);
2937 tg3_writephy(tp, MII_BMCR,
2938 BMCR_ANENABLE | BMCR_ANRESTART);
2942 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2946 /* Turn off tap power management. */
2947 /* Set Extended packet length bit */
2948 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2950 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2951 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2953 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2954 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2956 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2957 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2959 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2960 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2962 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2963 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2970 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2972 u32 adv_reg, all_mask = 0;
2974 if (mask & ADVERTISED_10baseT_Half)
2975 all_mask |= ADVERTISE_10HALF;
2976 if (mask & ADVERTISED_10baseT_Full)
2977 all_mask |= ADVERTISE_10FULL;
2978 if (mask & ADVERTISED_100baseT_Half)
2979 all_mask |= ADVERTISE_100HALF;
2980 if (mask & ADVERTISED_100baseT_Full)
2981 all_mask |= ADVERTISE_100FULL;
2983 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2986 if ((adv_reg & all_mask) != all_mask)
2988 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2992 if (mask & ADVERTISED_1000baseT_Half)
2993 all_mask |= ADVERTISE_1000HALF;
2994 if (mask & ADVERTISED_1000baseT_Full)
2995 all_mask |= ADVERTISE_1000FULL;
2997 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3000 if ((tg3_ctrl & all_mask) != all_mask)
3006 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3010 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3013 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3014 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3016 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3017 if (curadv != reqadv)
3020 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3021 tg3_readphy(tp, MII_LPA, rmtadv);
3023 /* Reprogram the advertisement register, even if it
3024 * does not affect the current link. If the link
3025 * gets renegotiated in the future, we can save an
3026 * additional renegotiation cycle by advertising
3027 * it correctly in the first place.
3029 if (curadv != reqadv) {
3030 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3031 ADVERTISE_PAUSE_ASYM);
3032 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3039 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3041 int current_link_up;
3043 u32 lcl_adv, rmt_adv;
3051 (MAC_STATUS_SYNC_CHANGED |
3052 MAC_STATUS_CFG_CHANGED |
3053 MAC_STATUS_MI_COMPLETION |
3054 MAC_STATUS_LNKSTATE_CHANGED));
3057 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3059 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3063 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3065 /* Some third-party PHYs need to be reset on link going
3068 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3070 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3071 netif_carrier_ok(tp->dev)) {
3072 tg3_readphy(tp, MII_BMSR, &bmsr);
3073 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3074 !(bmsr & BMSR_LSTATUS))
3080 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3081 tg3_readphy(tp, MII_BMSR, &bmsr);
3082 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3083 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3086 if (!(bmsr & BMSR_LSTATUS)) {
3087 err = tg3_init_5401phy_dsp(tp);
3091 tg3_readphy(tp, MII_BMSR, &bmsr);
3092 for (i = 0; i < 1000; i++) {
3094 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3095 (bmsr & BMSR_LSTATUS)) {
3101 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3102 !(bmsr & BMSR_LSTATUS) &&
3103 tp->link_config.active_speed == SPEED_1000) {
3104 err = tg3_phy_reset(tp);
3106 err = tg3_init_5401phy_dsp(tp);
3111 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3112 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3113 /* 5701 {A0,B0} CRC bug workaround */
3114 tg3_writephy(tp, 0x15, 0x0a75);
3115 tg3_writephy(tp, 0x1c, 0x8c68);
3116 tg3_writephy(tp, 0x1c, 0x8d68);
3117 tg3_writephy(tp, 0x1c, 0x8c68);
3120 /* Clear pending interrupts... */
3121 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3122 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3124 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3125 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3126 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3127 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3131 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3132 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3133 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3135 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3138 current_link_up = 0;
3139 current_speed = SPEED_INVALID;
3140 current_duplex = DUPLEX_INVALID;
3142 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3145 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3146 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3147 if (!(val & (1 << 10))) {
3149 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3155 for (i = 0; i < 100; i++) {
3156 tg3_readphy(tp, MII_BMSR, &bmsr);
3157 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3158 (bmsr & BMSR_LSTATUS))
3163 if (bmsr & BMSR_LSTATUS) {
3166 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3167 for (i = 0; i < 2000; i++) {
3169 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3174 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3179 for (i = 0; i < 200; i++) {
3180 tg3_readphy(tp, MII_BMCR, &bmcr);
3181 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3183 if (bmcr && bmcr != 0x7fff)
3191 tp->link_config.active_speed = current_speed;
3192 tp->link_config.active_duplex = current_duplex;
3194 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3195 if ((bmcr & BMCR_ANENABLE) &&
3196 tg3_copper_is_advertising_all(tp,
3197 tp->link_config.advertising)) {
3198 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3200 current_link_up = 1;
3203 if (!(bmcr & BMCR_ANENABLE) &&
3204 tp->link_config.speed == current_speed &&
3205 tp->link_config.duplex == current_duplex &&
3206 tp->link_config.flowctrl ==
3207 tp->link_config.active_flowctrl) {
3208 current_link_up = 1;
3212 if (current_link_up == 1 &&
3213 tp->link_config.active_duplex == DUPLEX_FULL)
3214 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3218 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3221 tg3_phy_copper_begin(tp);
3223 tg3_readphy(tp, MII_BMSR, &tmp);
3224 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3225 (tmp & BMSR_LSTATUS))
3226 current_link_up = 1;
3229 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3230 if (current_link_up == 1) {
3231 if (tp->link_config.active_speed == SPEED_100 ||
3232 tp->link_config.active_speed == SPEED_10)
3233 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3235 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3236 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3237 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3239 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3241 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3242 if (tp->link_config.active_duplex == DUPLEX_HALF)
3243 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3246 if (current_link_up == 1 &&
3247 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3248 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3250 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3253 /* ??? Without this setting Netgear GA302T PHY does not
3254 * ??? send/receive packets...
3256 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3257 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3258 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3259 tw32_f(MAC_MI_MODE, tp->mi_mode);
3263 tw32_f(MAC_MODE, tp->mac_mode);
3266 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3267 /* Polled via timer. */
3268 tw32_f(MAC_EVENT, 0);
3270 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3275 current_link_up == 1 &&
3276 tp->link_config.active_speed == SPEED_1000 &&
3277 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3278 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3281 (MAC_STATUS_SYNC_CHANGED |
3282 MAC_STATUS_CFG_CHANGED));
3285 NIC_SRAM_FIRMWARE_MBOX,
3286 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3289 /* Prevent send BD corruption. */
3290 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3291 u16 oldlnkctl, newlnkctl;
3293 pci_read_config_word(tp->pdev,
3294 tp->pcie_cap + PCI_EXP_LNKCTL,
3296 if (tp->link_config.active_speed == SPEED_100 ||
3297 tp->link_config.active_speed == SPEED_10)
3298 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3300 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3301 if (newlnkctl != oldlnkctl)
3302 pci_write_config_word(tp->pdev,
3303 tp->pcie_cap + PCI_EXP_LNKCTL,
3307 if (current_link_up != netif_carrier_ok(tp->dev)) {
3308 if (current_link_up)
3309 netif_carrier_on(tp->dev);
3311 netif_carrier_off(tp->dev);
3312 tg3_link_report(tp);
3318 struct tg3_fiber_aneginfo {
3320 #define ANEG_STATE_UNKNOWN 0
3321 #define ANEG_STATE_AN_ENABLE 1
3322 #define ANEG_STATE_RESTART_INIT 2
3323 #define ANEG_STATE_RESTART 3
3324 #define ANEG_STATE_DISABLE_LINK_OK 4
3325 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3326 #define ANEG_STATE_ABILITY_DETECT 6
3327 #define ANEG_STATE_ACK_DETECT_INIT 7
3328 #define ANEG_STATE_ACK_DETECT 8
3329 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3330 #define ANEG_STATE_COMPLETE_ACK 10
3331 #define ANEG_STATE_IDLE_DETECT_INIT 11
3332 #define ANEG_STATE_IDLE_DETECT 12
3333 #define ANEG_STATE_LINK_OK 13
3334 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3335 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3338 #define MR_AN_ENABLE 0x00000001
3339 #define MR_RESTART_AN 0x00000002
3340 #define MR_AN_COMPLETE 0x00000004
3341 #define MR_PAGE_RX 0x00000008
3342 #define MR_NP_LOADED 0x00000010
3343 #define MR_TOGGLE_TX 0x00000020
3344 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3345 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3346 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3347 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3348 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3349 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3350 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3351 #define MR_TOGGLE_RX 0x00002000
3352 #define MR_NP_RX 0x00004000
3354 #define MR_LINK_OK 0x80000000
3356 unsigned long link_time, cur_time;
3358 u32 ability_match_cfg;
3359 int ability_match_count;
3361 char ability_match, idle_match, ack_match;
3363 u32 txconfig, rxconfig;
3364 #define ANEG_CFG_NP 0x00000080
3365 #define ANEG_CFG_ACK 0x00000040
3366 #define ANEG_CFG_RF2 0x00000020
3367 #define ANEG_CFG_RF1 0x00000010
3368 #define ANEG_CFG_PS2 0x00000001
3369 #define ANEG_CFG_PS1 0x00008000
3370 #define ANEG_CFG_HD 0x00004000
3371 #define ANEG_CFG_FD 0x00002000
3372 #define ANEG_CFG_INVAL 0x00001f06
3377 #define ANEG_TIMER_ENAB 2
3378 #define ANEG_FAILED -1
3380 #define ANEG_STATE_SETTLE_TIME 10000
3382 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3383 struct tg3_fiber_aneginfo *ap)
3386 unsigned long delta;
3390 if (ap->state == ANEG_STATE_UNKNOWN) {
3394 ap->ability_match_cfg = 0;
3395 ap->ability_match_count = 0;
3396 ap->ability_match = 0;
3402 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3403 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3405 if (rx_cfg_reg != ap->ability_match_cfg) {
3406 ap->ability_match_cfg = rx_cfg_reg;
3407 ap->ability_match = 0;
3408 ap->ability_match_count = 0;
3410 if (++ap->ability_match_count > 1) {
3411 ap->ability_match = 1;
3412 ap->ability_match_cfg = rx_cfg_reg;
3415 if (rx_cfg_reg & ANEG_CFG_ACK)
3423 ap->ability_match_cfg = 0;
3424 ap->ability_match_count = 0;
3425 ap->ability_match = 0;
3431 ap->rxconfig = rx_cfg_reg;
3435 case ANEG_STATE_UNKNOWN:
3436 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3437 ap->state = ANEG_STATE_AN_ENABLE;
3440 case ANEG_STATE_AN_ENABLE:
3441 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3442 if (ap->flags & MR_AN_ENABLE) {
3445 ap->ability_match_cfg = 0;
3446 ap->ability_match_count = 0;
3447 ap->ability_match = 0;
3451 ap->state = ANEG_STATE_RESTART_INIT;
3453 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3457 case ANEG_STATE_RESTART_INIT:
3458 ap->link_time = ap->cur_time;
3459 ap->flags &= ~(MR_NP_LOADED);
3461 tw32(MAC_TX_AUTO_NEG, 0);
3462 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3463 tw32_f(MAC_MODE, tp->mac_mode);
3466 ret = ANEG_TIMER_ENAB;
3467 ap->state = ANEG_STATE_RESTART;
3470 case ANEG_STATE_RESTART:
3471 delta = ap->cur_time - ap->link_time;
3472 if (delta > ANEG_STATE_SETTLE_TIME) {
3473 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3475 ret = ANEG_TIMER_ENAB;
3479 case ANEG_STATE_DISABLE_LINK_OK:
3483 case ANEG_STATE_ABILITY_DETECT_INIT:
3484 ap->flags &= ~(MR_TOGGLE_TX);
3485 ap->txconfig = ANEG_CFG_FD;
3486 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3487 if (flowctrl & ADVERTISE_1000XPAUSE)
3488 ap->txconfig |= ANEG_CFG_PS1;
3489 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3490 ap->txconfig |= ANEG_CFG_PS2;
3491 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3492 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3493 tw32_f(MAC_MODE, tp->mac_mode);
3496 ap->state = ANEG_STATE_ABILITY_DETECT;
3499 case ANEG_STATE_ABILITY_DETECT:
3500 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3501 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3505 case ANEG_STATE_ACK_DETECT_INIT:
3506 ap->txconfig |= ANEG_CFG_ACK;
3507 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3508 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3509 tw32_f(MAC_MODE, tp->mac_mode);
3512 ap->state = ANEG_STATE_ACK_DETECT;
3515 case ANEG_STATE_ACK_DETECT:
3516 if (ap->ack_match != 0) {
3517 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3518 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3519 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3521 ap->state = ANEG_STATE_AN_ENABLE;
3523 } else if (ap->ability_match != 0 &&
3524 ap->rxconfig == 0) {
3525 ap->state = ANEG_STATE_AN_ENABLE;
3529 case ANEG_STATE_COMPLETE_ACK_INIT:
3530 if (ap->rxconfig & ANEG_CFG_INVAL) {
3534 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3535 MR_LP_ADV_HALF_DUPLEX |
3536 MR_LP_ADV_SYM_PAUSE |
3537 MR_LP_ADV_ASYM_PAUSE |
3538 MR_LP_ADV_REMOTE_FAULT1 |
3539 MR_LP_ADV_REMOTE_FAULT2 |
3540 MR_LP_ADV_NEXT_PAGE |
3543 if (ap->rxconfig & ANEG_CFG_FD)
3544 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3545 if (ap->rxconfig & ANEG_CFG_HD)
3546 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3547 if (ap->rxconfig & ANEG_CFG_PS1)
3548 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3549 if (ap->rxconfig & ANEG_CFG_PS2)
3550 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3551 if (ap->rxconfig & ANEG_CFG_RF1)
3552 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3553 if (ap->rxconfig & ANEG_CFG_RF2)
3554 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3555 if (ap->rxconfig & ANEG_CFG_NP)
3556 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3558 ap->link_time = ap->cur_time;
3560 ap->flags ^= (MR_TOGGLE_TX);
3561 if (ap->rxconfig & 0x0008)
3562 ap->flags |= MR_TOGGLE_RX;
3563 if (ap->rxconfig & ANEG_CFG_NP)
3564 ap->flags |= MR_NP_RX;
3565 ap->flags |= MR_PAGE_RX;
3567 ap->state = ANEG_STATE_COMPLETE_ACK;
3568 ret = ANEG_TIMER_ENAB;
3571 case ANEG_STATE_COMPLETE_ACK:
3572 if (ap->ability_match != 0 &&
3573 ap->rxconfig == 0) {
3574 ap->state = ANEG_STATE_AN_ENABLE;
3577 delta = ap->cur_time - ap->link_time;
3578 if (delta > ANEG_STATE_SETTLE_TIME) {
3579 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3580 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3582 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3583 !(ap->flags & MR_NP_RX)) {
3584 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3592 case ANEG_STATE_IDLE_DETECT_INIT:
3593 ap->link_time = ap->cur_time;
3594 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3595 tw32_f(MAC_MODE, tp->mac_mode);
3598 ap->state = ANEG_STATE_IDLE_DETECT;
3599 ret = ANEG_TIMER_ENAB;
3602 case ANEG_STATE_IDLE_DETECT:
3603 if (ap->ability_match != 0 &&
3604 ap->rxconfig == 0) {
3605 ap->state = ANEG_STATE_AN_ENABLE;
3608 delta = ap->cur_time - ap->link_time;
3609 if (delta > ANEG_STATE_SETTLE_TIME) {
3610 /* XXX another gem from the Broadcom driver :( */
3611 ap->state = ANEG_STATE_LINK_OK;
3615 case ANEG_STATE_LINK_OK:
3616 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3620 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3621 /* ??? unimplemented */
3624 case ANEG_STATE_NEXT_PAGE_WAIT:
3625 /* ??? unimplemented */
3636 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3639 struct tg3_fiber_aneginfo aninfo;
3640 int status = ANEG_FAILED;
3644 tw32_f(MAC_TX_AUTO_NEG, 0);
3646 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3647 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3650 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3653 memset(&aninfo, 0, sizeof(aninfo));
3654 aninfo.flags |= MR_AN_ENABLE;
3655 aninfo.state = ANEG_STATE_UNKNOWN;
3656 aninfo.cur_time = 0;
3658 while (++tick < 195000) {
3659 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3660 if (status == ANEG_DONE || status == ANEG_FAILED)
3666 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3667 tw32_f(MAC_MODE, tp->mac_mode);
3670 *txflags = aninfo.txconfig;
3671 *rxflags = aninfo.flags;
3673 if (status == ANEG_DONE &&
3674 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3675 MR_LP_ADV_FULL_DUPLEX)))
3681 static void tg3_init_bcm8002(struct tg3 *tp)
3683 u32 mac_status = tr32(MAC_STATUS);
3686 /* Reset when initting first time or we have a link. */
3687 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3688 !(mac_status & MAC_STATUS_PCS_SYNCED))
3691 /* Set PLL lock range. */
3692 tg3_writephy(tp, 0x16, 0x8007);
3695 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3697 /* Wait for reset to complete. */
3698 /* XXX schedule_timeout() ... */
3699 for (i = 0; i < 500; i++)
3702 /* Config mode; select PMA/Ch 1 regs. */
3703 tg3_writephy(tp, 0x10, 0x8411);
3705 /* Enable auto-lock and comdet, select txclk for tx. */
3706 tg3_writephy(tp, 0x11, 0x0a10);
3708 tg3_writephy(tp, 0x18, 0x00a0);
3709 tg3_writephy(tp, 0x16, 0x41ff);
3711 /* Assert and deassert POR. */
3712 tg3_writephy(tp, 0x13, 0x0400);
3714 tg3_writephy(tp, 0x13, 0x0000);
3716 tg3_writephy(tp, 0x11, 0x0a50);
3718 tg3_writephy(tp, 0x11, 0x0a10);
3720 /* Wait for signal to stabilize */
3721 /* XXX schedule_timeout() ... */
3722 for (i = 0; i < 15000; i++)
3725 /* Deselect the channel register so we can read the PHYID
3728 tg3_writephy(tp, 0x10, 0x8011);
3731 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3734 u32 sg_dig_ctrl, sg_dig_status;
3735 u32 serdes_cfg, expected_sg_dig_ctrl;
3736 int workaround, port_a;
3737 int current_link_up;
3740 expected_sg_dig_ctrl = 0;
3743 current_link_up = 0;
3745 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3746 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3748 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3751 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3752 /* preserve bits 20-23 for voltage regulator */
3753 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3756 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3758 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3759 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3761 u32 val = serdes_cfg;
3767 tw32_f(MAC_SERDES_CFG, val);
3770 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3772 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3773 tg3_setup_flow_control(tp, 0, 0);
3774 current_link_up = 1;
3779 /* Want auto-negotiation. */
3780 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3782 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3783 if (flowctrl & ADVERTISE_1000XPAUSE)
3784 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3785 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3786 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3788 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3789 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3790 tp->serdes_counter &&
3791 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3792 MAC_STATUS_RCVD_CFG)) ==
3793 MAC_STATUS_PCS_SYNCED)) {
3794 tp->serdes_counter--;
3795 current_link_up = 1;
3800 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3801 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3803 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3805 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3806 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3807 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3808 MAC_STATUS_SIGNAL_DET)) {
3809 sg_dig_status = tr32(SG_DIG_STATUS);
3810 mac_status = tr32(MAC_STATUS);
3812 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3813 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3814 u32 local_adv = 0, remote_adv = 0;
3816 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3817 local_adv |= ADVERTISE_1000XPAUSE;
3818 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3819 local_adv |= ADVERTISE_1000XPSE_ASYM;
3821 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3822 remote_adv |= LPA_1000XPAUSE;
3823 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3824 remote_adv |= LPA_1000XPAUSE_ASYM;
3826 tg3_setup_flow_control(tp, local_adv, remote_adv);
3827 current_link_up = 1;
3828 tp->serdes_counter = 0;
3829 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3830 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3831 if (tp->serdes_counter)
3832 tp->serdes_counter--;
3835 u32 val = serdes_cfg;
3842 tw32_f(MAC_SERDES_CFG, val);
3845 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3848 /* Link parallel detection - link is up */
3849 /* only if we have PCS_SYNC and not */
3850 /* receiving config code words */
3851 mac_status = tr32(MAC_STATUS);
3852 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3853 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3854 tg3_setup_flow_control(tp, 0, 0);
3855 current_link_up = 1;
3857 TG3_FLG2_PARALLEL_DETECT;
3858 tp->serdes_counter =
3859 SERDES_PARALLEL_DET_TIMEOUT;
3861 goto restart_autoneg;
3865 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3866 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3870 return current_link_up;
3873 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3875 int current_link_up = 0;
3877 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3880 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3881 u32 txflags, rxflags;
3884 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3885 u32 local_adv = 0, remote_adv = 0;
3887 if (txflags & ANEG_CFG_PS1)
3888 local_adv |= ADVERTISE_1000XPAUSE;
3889 if (txflags & ANEG_CFG_PS2)
3890 local_adv |= ADVERTISE_1000XPSE_ASYM;
3892 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3893 remote_adv |= LPA_1000XPAUSE;
3894 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3895 remote_adv |= LPA_1000XPAUSE_ASYM;
3897 tg3_setup_flow_control(tp, local_adv, remote_adv);
3899 current_link_up = 1;
3901 for (i = 0; i < 30; i++) {
3904 (MAC_STATUS_SYNC_CHANGED |
3905 MAC_STATUS_CFG_CHANGED));
3907 if ((tr32(MAC_STATUS) &
3908 (MAC_STATUS_SYNC_CHANGED |
3909 MAC_STATUS_CFG_CHANGED)) == 0)
3913 mac_status = tr32(MAC_STATUS);
3914 if (current_link_up == 0 &&
3915 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3916 !(mac_status & MAC_STATUS_RCVD_CFG))
3917 current_link_up = 1;
3919 tg3_setup_flow_control(tp, 0, 0);
3921 /* Forcing 1000FD link up. */
3922 current_link_up = 1;
3924 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3927 tw32_f(MAC_MODE, tp->mac_mode);
3932 return current_link_up;
3935 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3938 u16 orig_active_speed;
3939 u8 orig_active_duplex;
3941 int current_link_up;
3944 orig_pause_cfg = tp->link_config.active_flowctrl;
3945 orig_active_speed = tp->link_config.active_speed;
3946 orig_active_duplex = tp->link_config.active_duplex;
3948 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3949 netif_carrier_ok(tp->dev) &&
3950 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3951 mac_status = tr32(MAC_STATUS);
3952 mac_status &= (MAC_STATUS_PCS_SYNCED |
3953 MAC_STATUS_SIGNAL_DET |
3954 MAC_STATUS_CFG_CHANGED |
3955 MAC_STATUS_RCVD_CFG);
3956 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3957 MAC_STATUS_SIGNAL_DET)) {
3958 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3959 MAC_STATUS_CFG_CHANGED));
3964 tw32_f(MAC_TX_AUTO_NEG, 0);
3966 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3967 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3968 tw32_f(MAC_MODE, tp->mac_mode);
3971 if (tp->phy_id == PHY_ID_BCM8002)
3972 tg3_init_bcm8002(tp);
3974 /* Enable link change event even when serdes polling. */
3975 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3978 current_link_up = 0;
3979 mac_status = tr32(MAC_STATUS);
3981 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3982 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3984 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3986 tp->napi[0].hw_status->status =
3987 (SD_STATUS_UPDATED |
3988 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3990 for (i = 0; i < 100; i++) {
3991 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3992 MAC_STATUS_CFG_CHANGED));
3994 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3995 MAC_STATUS_CFG_CHANGED |
3996 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4000 mac_status = tr32(MAC_STATUS);
4001 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4002 current_link_up = 0;
4003 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4004 tp->serdes_counter == 0) {
4005 tw32_f(MAC_MODE, (tp->mac_mode |
4006 MAC_MODE_SEND_CONFIGS));
4008 tw32_f(MAC_MODE, tp->mac_mode);
4012 if (current_link_up == 1) {
4013 tp->link_config.active_speed = SPEED_1000;
4014 tp->link_config.active_duplex = DUPLEX_FULL;
4015 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4016 LED_CTRL_LNKLED_OVERRIDE |
4017 LED_CTRL_1000MBPS_ON));
4019 tp->link_config.active_speed = SPEED_INVALID;
4020 tp->link_config.active_duplex = DUPLEX_INVALID;
4021 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4022 LED_CTRL_LNKLED_OVERRIDE |
4023 LED_CTRL_TRAFFIC_OVERRIDE));
4026 if (current_link_up != netif_carrier_ok(tp->dev)) {
4027 if (current_link_up)
4028 netif_carrier_on(tp->dev);
4030 netif_carrier_off(tp->dev);
4031 tg3_link_report(tp);
4033 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4034 if (orig_pause_cfg != now_pause_cfg ||
4035 orig_active_speed != tp->link_config.active_speed ||
4036 orig_active_duplex != tp->link_config.active_duplex)
4037 tg3_link_report(tp);
4043 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4045 int current_link_up, err = 0;
4049 u32 local_adv, remote_adv;
4051 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4052 tw32_f(MAC_MODE, tp->mac_mode);
4058 (MAC_STATUS_SYNC_CHANGED |
4059 MAC_STATUS_CFG_CHANGED |
4060 MAC_STATUS_MI_COMPLETION |
4061 MAC_STATUS_LNKSTATE_CHANGED));
4067 current_link_up = 0;
4068 current_speed = SPEED_INVALID;
4069 current_duplex = DUPLEX_INVALID;
4071 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4072 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4074 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4075 bmsr |= BMSR_LSTATUS;
4077 bmsr &= ~BMSR_LSTATUS;
4080 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4082 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4083 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4084 /* do nothing, just check for link up at the end */
4085 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4088 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4089 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4090 ADVERTISE_1000XPAUSE |
4091 ADVERTISE_1000XPSE_ASYM |
4094 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4096 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4097 new_adv |= ADVERTISE_1000XHALF;
4098 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4099 new_adv |= ADVERTISE_1000XFULL;
4101 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4102 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4103 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4104 tg3_writephy(tp, MII_BMCR, bmcr);
4106 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4107 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4108 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4115 bmcr &= ~BMCR_SPEED1000;
4116 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4118 if (tp->link_config.duplex == DUPLEX_FULL)
4119 new_bmcr |= BMCR_FULLDPLX;
4121 if (new_bmcr != bmcr) {
4122 /* BMCR_SPEED1000 is a reserved bit that needs
4123 * to be set on write.
4125 new_bmcr |= BMCR_SPEED1000;
4127 /* Force a linkdown */
4128 if (netif_carrier_ok(tp->dev)) {
4131 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4132 adv &= ~(ADVERTISE_1000XFULL |
4133 ADVERTISE_1000XHALF |
4135 tg3_writephy(tp, MII_ADVERTISE, adv);
4136 tg3_writephy(tp, MII_BMCR, bmcr |
4140 netif_carrier_off(tp->dev);
4142 tg3_writephy(tp, MII_BMCR, new_bmcr);
4144 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4145 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4146 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4148 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4149 bmsr |= BMSR_LSTATUS;
4151 bmsr &= ~BMSR_LSTATUS;
4153 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4157 if (bmsr & BMSR_LSTATUS) {
4158 current_speed = SPEED_1000;
4159 current_link_up = 1;
4160 if (bmcr & BMCR_FULLDPLX)
4161 current_duplex = DUPLEX_FULL;
4163 current_duplex = DUPLEX_HALF;
4168 if (bmcr & BMCR_ANENABLE) {
4171 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4172 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4173 common = local_adv & remote_adv;
4174 if (common & (ADVERTISE_1000XHALF |
4175 ADVERTISE_1000XFULL)) {
4176 if (common & ADVERTISE_1000XFULL)
4177 current_duplex = DUPLEX_FULL;
4179 current_duplex = DUPLEX_HALF;
4182 current_link_up = 0;
4186 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4187 tg3_setup_flow_control(tp, local_adv, remote_adv);
4189 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4190 if (tp->link_config.active_duplex == DUPLEX_HALF)
4191 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4193 tw32_f(MAC_MODE, tp->mac_mode);
4196 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4198 tp->link_config.active_speed = current_speed;
4199 tp->link_config.active_duplex = current_duplex;
4201 if (current_link_up != netif_carrier_ok(tp->dev)) {
4202 if (current_link_up)
4203 netif_carrier_on(tp->dev);
4205 netif_carrier_off(tp->dev);
4206 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4208 tg3_link_report(tp);
4213 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4215 if (tp->serdes_counter) {
4216 /* Give autoneg time to complete. */
4217 tp->serdes_counter--;
4220 if (!netif_carrier_ok(tp->dev) &&
4221 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4224 tg3_readphy(tp, MII_BMCR, &bmcr);
4225 if (bmcr & BMCR_ANENABLE) {
4228 /* Select shadow register 0x1f */
4229 tg3_writephy(tp, 0x1c, 0x7c00);
4230 tg3_readphy(tp, 0x1c, &phy1);
4232 /* Select expansion interrupt status register */
4233 tg3_writephy(tp, 0x17, 0x0f01);
4234 tg3_readphy(tp, 0x15, &phy2);
4235 tg3_readphy(tp, 0x15, &phy2);
4237 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4238 /* We have signal detect and not receiving
4239 * config code words, link is up by parallel
4243 bmcr &= ~BMCR_ANENABLE;
4244 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4245 tg3_writephy(tp, MII_BMCR, bmcr);
4246 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4250 else if (netif_carrier_ok(tp->dev) &&
4251 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4252 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4255 /* Select expansion interrupt status register */
4256 tg3_writephy(tp, 0x17, 0x0f01);
4257 tg3_readphy(tp, 0x15, &phy2);
4261 /* Config code words received, turn on autoneg. */
4262 tg3_readphy(tp, MII_BMCR, &bmcr);
4263 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4265 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4271 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4275 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4276 err = tg3_setup_fiber_phy(tp, force_reset);
4277 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4278 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4280 err = tg3_setup_copper_phy(tp, force_reset);
4283 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4286 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4287 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4289 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4294 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4295 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4296 tw32(GRC_MISC_CFG, val);
4299 if (tp->link_config.active_speed == SPEED_1000 &&
4300 tp->link_config.active_duplex == DUPLEX_HALF)
4301 tw32(MAC_TX_LENGTHS,
4302 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4303 (6 << TX_LENGTHS_IPG_SHIFT) |
4304 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4306 tw32(MAC_TX_LENGTHS,
4307 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4308 (6 << TX_LENGTHS_IPG_SHIFT) |
4309 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4311 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4312 if (netif_carrier_ok(tp->dev)) {
4313 tw32(HOSTCC_STAT_COAL_TICKS,
4314 tp->coal.stats_block_coalesce_usecs);
4316 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4320 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4321 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4322 if (!netif_carrier_ok(tp->dev))
4323 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4326 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4327 tw32(PCIE_PWR_MGMT_THRESH, val);
4333 /* This is called whenever we suspect that the system chipset is re-
4334 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4335 * is bogus tx completions. We try to recover by setting the
4336 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4339 static void tg3_tx_recover(struct tg3 *tp)
4341 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4342 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4344 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4345 "mapped I/O cycles to the network device, attempting to "
4346 "recover. Please report the problem to the driver maintainer "
4347 "and include system chipset information.\n", tp->dev->name);
4349 spin_lock(&tp->lock);
4350 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4351 spin_unlock(&tp->lock);
4354 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4357 return tnapi->tx_pending -
4358 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4361 /* Tigon3 never reports partial packet sends. So we do not
4362 * need special logic to handle SKBs that have not had all
4363 * of their frags sent yet, like SunGEM does.
4365 static void tg3_tx(struct tg3_napi *tnapi)
4367 struct tg3 *tp = tnapi->tp;
4368 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4369 u32 sw_idx = tnapi->tx_cons;
4370 struct netdev_queue *txq;
4371 int index = tnapi - tp->napi;
4373 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4376 txq = netdev_get_tx_queue(tp->dev, index);
4378 while (sw_idx != hw_idx) {
4379 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4380 struct sk_buff *skb = ri->skb;
4383 if (unlikely(skb == NULL)) {
4388 pci_unmap_single(tp->pdev,
4389 pci_unmap_addr(ri, mapping),
4395 sw_idx = NEXT_TX(sw_idx);
4397 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4398 ri = &tnapi->tx_buffers[sw_idx];
4399 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4402 pci_unmap_page(tp->pdev,
4403 pci_unmap_addr(ri, mapping),
4404 skb_shinfo(skb)->frags[i].size,
4406 sw_idx = NEXT_TX(sw_idx);
4411 if (unlikely(tx_bug)) {
4417 tnapi->tx_cons = sw_idx;
4419 /* Need to make the tx_cons update visible to tg3_start_xmit()
4420 * before checking for netif_queue_stopped(). Without the
4421 * memory barrier, there is a small possibility that tg3_start_xmit()
4422 * will miss it and cause the queue to be stopped forever.
4426 if (unlikely(netif_tx_queue_stopped(txq) &&
4427 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4428 __netif_tx_lock(txq, smp_processor_id());
4429 if (netif_tx_queue_stopped(txq) &&
4430 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4431 netif_tx_wake_queue(txq);
4432 __netif_tx_unlock(txq);
4436 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4441 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4442 map_sz, PCI_DMA_FROMDEVICE);
4443 dev_kfree_skb_any(ri->skb);
4447 /* Returns size of skb allocated or < 0 on error.
4449 * We only need to fill in the address because the other members
4450 * of the RX descriptor are invariant, see tg3_init_rings.
4452 * Note the purposeful assymetry of cpu vs. chip accesses. For
4453 * posting buffers we only dirty the first cache line of the RX
4454 * descriptor (containing the address). Whereas for the RX status
4455 * buffers the cpu only reads the last cacheline of the RX descriptor
4456 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4458 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4459 u32 opaque_key, u32 dest_idx_unmasked)
4461 struct tg3_rx_buffer_desc *desc;
4462 struct ring_info *map, *src_map;
4463 struct sk_buff *skb;
4465 int skb_size, dest_idx;
4468 switch (opaque_key) {
4469 case RXD_OPAQUE_RING_STD:
4470 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4471 desc = &tpr->rx_std[dest_idx];
4472 map = &tpr->rx_std_buffers[dest_idx];
4473 skb_size = tp->rx_pkt_map_sz;
4476 case RXD_OPAQUE_RING_JUMBO:
4477 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4478 desc = &tpr->rx_jmb[dest_idx].std;
4479 map = &tpr->rx_jmb_buffers[dest_idx];
4480 skb_size = TG3_RX_JMB_MAP_SZ;
4487 /* Do not overwrite any of the map or rp information
4488 * until we are sure we can commit to a new buffer.
4490 * Callers depend upon this behavior and assume that
4491 * we leave everything unchanged if we fail.
4493 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4497 skb_reserve(skb, tp->rx_offset);
4499 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4500 PCI_DMA_FROMDEVICE);
4501 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4507 pci_unmap_addr_set(map, mapping, mapping);
4509 desc->addr_hi = ((u64)mapping >> 32);
4510 desc->addr_lo = ((u64)mapping & 0xffffffff);
4515 /* We only need to move over in the address because the other
4516 * members of the RX descriptor are invariant. See notes above
4517 * tg3_alloc_rx_skb for full details.
4519 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4520 struct tg3_rx_prodring_set *dpr,
4521 u32 opaque_key, int src_idx,
4522 u32 dest_idx_unmasked)
4524 struct tg3 *tp = tnapi->tp;
4525 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4526 struct ring_info *src_map, *dest_map;
4528 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4530 switch (opaque_key) {
4531 case RXD_OPAQUE_RING_STD:
4532 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4533 dest_desc = &dpr->rx_std[dest_idx];
4534 dest_map = &dpr->rx_std_buffers[dest_idx];
4535 src_desc = &spr->rx_std[src_idx];
4536 src_map = &spr->rx_std_buffers[src_idx];
4539 case RXD_OPAQUE_RING_JUMBO:
4540 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4541 dest_desc = &dpr->rx_jmb[dest_idx].std;
4542 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4543 src_desc = &spr->rx_jmb[src_idx].std;
4544 src_map = &spr->rx_jmb_buffers[src_idx];
4551 dest_map->skb = src_map->skb;
4552 pci_unmap_addr_set(dest_map, mapping,
4553 pci_unmap_addr(src_map, mapping));
4554 dest_desc->addr_hi = src_desc->addr_hi;
4555 dest_desc->addr_lo = src_desc->addr_lo;
4557 /* Ensure that the update to the skb happens after the physical
4558 * addresses have been transferred to the new BD location.
4562 src_map->skb = NULL;
4565 /* The RX ring scheme is composed of multiple rings which post fresh
4566 * buffers to the chip, and one special ring the chip uses to report
4567 * status back to the host.
4569 * The special ring reports the status of received packets to the
4570 * host. The chip does not write into the original descriptor the
4571 * RX buffer was obtained from. The chip simply takes the original
4572 * descriptor as provided by the host, updates the status and length
4573 * field, then writes this into the next status ring entry.
4575 * Each ring the host uses to post buffers to the chip is described
4576 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4577 * it is first placed into the on-chip ram. When the packet's length
4578 * is known, it walks down the TG3_BDINFO entries to select the ring.
4579 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4580 * which is within the range of the new packet's length is chosen.
4582 * The "separate ring for rx status" scheme may sound queer, but it makes
4583 * sense from a cache coherency perspective. If only the host writes
4584 * to the buffer post rings, and only the chip writes to the rx status
4585 * rings, then cache lines never move beyond shared-modified state.
4586 * If both the host and chip were to write into the same ring, cache line
4587 * eviction could occur since both entities want it in an exclusive state.
4589 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4591 struct tg3 *tp = tnapi->tp;
4592 u32 work_mask, rx_std_posted = 0;
4593 u32 std_prod_idx, jmb_prod_idx;
4594 u32 sw_idx = tnapi->rx_rcb_ptr;
4597 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4599 hw_idx = *(tnapi->rx_rcb_prod_idx);
4601 * We need to order the read of hw_idx and the read of
4602 * the opaque cookie.
4607 std_prod_idx = tpr->rx_std_prod_idx;
4608 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4609 while (sw_idx != hw_idx && budget > 0) {
4610 struct ring_info *ri;
4611 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4613 struct sk_buff *skb;
4614 dma_addr_t dma_addr;
4615 u32 opaque_key, desc_idx, *post_ptr;
4617 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4618 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4619 if (opaque_key == RXD_OPAQUE_RING_STD) {
4620 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4621 dma_addr = pci_unmap_addr(ri, mapping);
4623 post_ptr = &std_prod_idx;
4625 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4626 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4627 dma_addr = pci_unmap_addr(ri, mapping);
4629 post_ptr = &jmb_prod_idx;
4631 goto next_pkt_nopost;
4633 work_mask |= opaque_key;
4635 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4636 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4638 tg3_recycle_rx(tnapi, tpr, opaque_key,
4639 desc_idx, *post_ptr);
4641 /* Other statistics kept track of by card. */
4642 tp->net_stats.rx_dropped++;
4646 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4649 if (len > RX_COPY_THRESHOLD &&
4650 tp->rx_offset == NET_IP_ALIGN) {
4651 /* rx_offset will likely not equal NET_IP_ALIGN
4652 * if this is a 5701 card running in PCI-X mode
4653 * [see tg3_get_invariants()]
4657 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4664 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4665 PCI_DMA_FROMDEVICE);
4669 struct sk_buff *copy_skb;
4671 tg3_recycle_rx(tnapi, tpr, opaque_key,
4672 desc_idx, *post_ptr);
4674 copy_skb = netdev_alloc_skb(tp->dev,
4675 len + TG3_RAW_IP_ALIGN);
4676 if (copy_skb == NULL)
4677 goto drop_it_no_recycle;
4679 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4680 skb_put(copy_skb, len);
4681 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4682 skb_copy_from_linear_data(skb, copy_skb->data, len);
4683 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4685 /* We'll reuse the original ring buffer. */
4689 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4690 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4691 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4692 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4693 skb->ip_summed = CHECKSUM_UNNECESSARY;
4695 skb->ip_summed = CHECKSUM_NONE;
4697 skb->protocol = eth_type_trans(skb, tp->dev);
4699 if (len > (tp->dev->mtu + ETH_HLEN) &&
4700 skb->protocol != htons(ETH_P_8021Q)) {
4705 #if TG3_VLAN_TAG_USED
4706 if (tp->vlgrp != NULL &&
4707 desc->type_flags & RXD_FLAG_VLAN) {
4708 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4709 desc->err_vlan & RXD_VLAN_MASK, skb);
4712 napi_gro_receive(&tnapi->napi, skb);
4720 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4721 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4722 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4723 tpr->rx_std_prod_idx);
4724 work_mask &= ~RXD_OPAQUE_RING_STD;
4729 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4731 /* Refresh hw_idx to see if there is new work */
4732 if (sw_idx == hw_idx) {
4733 hw_idx = *(tnapi->rx_rcb_prod_idx);
4738 /* ACK the status ring. */
4739 tnapi->rx_rcb_ptr = sw_idx;
4740 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4742 /* Refill RX ring(s). */
4743 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4744 if (work_mask & RXD_OPAQUE_RING_STD) {
4745 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4746 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4747 tpr->rx_std_prod_idx);
4749 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4750 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4751 TG3_RX_JUMBO_RING_SIZE;
4752 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4753 tpr->rx_jmb_prod_idx);
4756 } else if (work_mask) {
4757 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4758 * updated before the producer indices can be updated.
4762 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4763 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4765 if (tnapi != &tp->napi[1])
4766 napi_schedule(&tp->napi[1].napi);
4772 static void tg3_poll_link(struct tg3 *tp)
4774 /* handle link change and other phy events */
4775 if (!(tp->tg3_flags &
4776 (TG3_FLAG_USE_LINKCHG_REG |
4777 TG3_FLAG_POLL_SERDES))) {
4778 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4780 if (sblk->status & SD_STATUS_LINK_CHG) {
4781 sblk->status = SD_STATUS_UPDATED |
4782 (sblk->status & ~SD_STATUS_LINK_CHG);
4783 spin_lock(&tp->lock);
4784 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4786 (MAC_STATUS_SYNC_CHANGED |
4787 MAC_STATUS_CFG_CHANGED |
4788 MAC_STATUS_MI_COMPLETION |
4789 MAC_STATUS_LNKSTATE_CHANGED));
4792 tg3_setup_phy(tp, 0);
4793 spin_unlock(&tp->lock);
4798 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4799 struct tg3_rx_prodring_set *dpr,
4800 struct tg3_rx_prodring_set *spr)
4802 u32 si, di, cpycnt, src_prod_idx;
4806 src_prod_idx = spr->rx_std_prod_idx;
4808 /* Make sure updates to the rx_std_buffers[] entries and the
4809 * standard producer index are seen in the correct order.
4813 if (spr->rx_std_cons_idx == src_prod_idx)
4816 if (spr->rx_std_cons_idx < src_prod_idx)
4817 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4819 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4821 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4823 si = spr->rx_std_cons_idx;
4824 di = dpr->rx_std_prod_idx;
4826 for (i = di; i < di + cpycnt; i++) {
4827 if (dpr->rx_std_buffers[i].skb) {
4837 /* Ensure that updates to the rx_std_buffers ring and the
4838 * shadowed hardware producer ring from tg3_recycle_skb() are
4839 * ordered correctly WRT the skb check above.
4843 memcpy(&dpr->rx_std_buffers[di],
4844 &spr->rx_std_buffers[si],
4845 cpycnt * sizeof(struct ring_info));
4847 for (i = 0; i < cpycnt; i++, di++, si++) {
4848 struct tg3_rx_buffer_desc *sbd, *dbd;
4849 sbd = &spr->rx_std[si];
4850 dbd = &dpr->rx_std[di];
4851 dbd->addr_hi = sbd->addr_hi;
4852 dbd->addr_lo = sbd->addr_lo;
4855 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4857 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4862 src_prod_idx = spr->rx_jmb_prod_idx;
4864 /* Make sure updates to the rx_jmb_buffers[] entries and
4865 * the jumbo producer index are seen in the correct order.
4869 if (spr->rx_jmb_cons_idx == src_prod_idx)
4872 if (spr->rx_jmb_cons_idx < src_prod_idx)
4873 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4875 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4877 cpycnt = min(cpycnt,
4878 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4880 si = spr->rx_jmb_cons_idx;
4881 di = dpr->rx_jmb_prod_idx;
4883 for (i = di; i < di + cpycnt; i++) {
4884 if (dpr->rx_jmb_buffers[i].skb) {
4894 /* Ensure that updates to the rx_jmb_buffers ring and the
4895 * shadowed hardware producer ring from tg3_recycle_skb() are
4896 * ordered correctly WRT the skb check above.
4900 memcpy(&dpr->rx_jmb_buffers[di],
4901 &spr->rx_jmb_buffers[si],
4902 cpycnt * sizeof(struct ring_info));
4904 for (i = 0; i < cpycnt; i++, di++, si++) {
4905 struct tg3_rx_buffer_desc *sbd, *dbd;
4906 sbd = &spr->rx_jmb[si].std;
4907 dbd = &dpr->rx_jmb[di].std;
4908 dbd->addr_hi = sbd->addr_hi;
4909 dbd->addr_lo = sbd->addr_lo;
4912 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4913 TG3_RX_JUMBO_RING_SIZE;
4914 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4915 TG3_RX_JUMBO_RING_SIZE;
4921 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4923 struct tg3 *tp = tnapi->tp;
4925 /* run TX completion thread */
4926 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4928 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4932 /* run RX thread, within the bounds set by NAPI.
4933 * All RX "locking" is done by ensuring outside
4934 * code synchronizes with tg3->napi.poll()
4936 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4937 work_done += tg3_rx(tnapi, budget - work_done);
4939 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4940 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4942 u32 std_prod_idx = dpr->rx_std_prod_idx;
4943 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4945 for (i = 1; i < tp->irq_cnt; i++)
4946 err |= tg3_rx_prodring_xfer(tp, dpr,
4947 tp->napi[i].prodring);
4951 if (std_prod_idx != dpr->rx_std_prod_idx)
4952 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4953 dpr->rx_std_prod_idx);
4955 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4956 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4957 dpr->rx_jmb_prod_idx);
4962 tw32_f(HOSTCC_MODE, tp->coal_now);
4968 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4970 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4971 struct tg3 *tp = tnapi->tp;
4973 struct tg3_hw_status *sblk = tnapi->hw_status;
4976 work_done = tg3_poll_work(tnapi, work_done, budget);
4978 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4981 if (unlikely(work_done >= budget))
4984 /* tp->last_tag is used in tg3_restart_ints() below
4985 * to tell the hw how much work has been processed,
4986 * so we must read it before checking for more work.
4988 tnapi->last_tag = sblk->status_tag;
4989 tnapi->last_irq_tag = tnapi->last_tag;
4992 /* check for RX/TX work to do */
4993 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4994 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4995 napi_complete(napi);
4996 /* Reenable interrupts. */
4997 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5006 /* work_done is guaranteed to be less than budget. */
5007 napi_complete(napi);
5008 schedule_work(&tp->reset_task);
5012 static int tg3_poll(struct napi_struct *napi, int budget)
5014 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5015 struct tg3 *tp = tnapi->tp;
5017 struct tg3_hw_status *sblk = tnapi->hw_status;
5022 work_done = tg3_poll_work(tnapi, work_done, budget);
5024 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5027 if (unlikely(work_done >= budget))
5030 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5031 /* tp->last_tag is used in tg3_int_reenable() below
5032 * to tell the hw how much work has been processed,
5033 * so we must read it before checking for more work.
5035 tnapi->last_tag = sblk->status_tag;
5036 tnapi->last_irq_tag = tnapi->last_tag;
5039 sblk->status &= ~SD_STATUS_UPDATED;
5041 if (likely(!tg3_has_work(tnapi))) {
5042 napi_complete(napi);
5043 tg3_int_reenable(tnapi);
5051 /* work_done is guaranteed to be less than budget. */
5052 napi_complete(napi);
5053 schedule_work(&tp->reset_task);
5057 static void tg3_irq_quiesce(struct tg3 *tp)
5061 BUG_ON(tp->irq_sync);
5066 for (i = 0; i < tp->irq_cnt; i++)
5067 synchronize_irq(tp->napi[i].irq_vec);
5070 static inline int tg3_irq_sync(struct tg3 *tp)
5072 return tp->irq_sync;
5075 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5076 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5077 * with as well. Most of the time, this is not necessary except when
5078 * shutting down the device.
5080 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5082 spin_lock_bh(&tp->lock);
5084 tg3_irq_quiesce(tp);
5087 static inline void tg3_full_unlock(struct tg3 *tp)
5089 spin_unlock_bh(&tp->lock);
5092 /* One-shot MSI handler - Chip automatically disables interrupt
5093 * after sending MSI so driver doesn't have to do it.
5095 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5097 struct tg3_napi *tnapi = dev_id;
5098 struct tg3 *tp = tnapi->tp;
5100 prefetch(tnapi->hw_status);
5102 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5104 if (likely(!tg3_irq_sync(tp)))
5105 napi_schedule(&tnapi->napi);
5110 /* MSI ISR - No need to check for interrupt sharing and no need to
5111 * flush status block and interrupt mailbox. PCI ordering rules
5112 * guarantee that MSI will arrive after the status block.
5114 static irqreturn_t tg3_msi(int irq, void *dev_id)
5116 struct tg3_napi *tnapi = dev_id;
5117 struct tg3 *tp = tnapi->tp;
5119 prefetch(tnapi->hw_status);
5121 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5123 * Writing any value to intr-mbox-0 clears PCI INTA# and
5124 * chip-internal interrupt pending events.
5125 * Writing non-zero to intr-mbox-0 additional tells the
5126 * NIC to stop sending us irqs, engaging "in-intr-handler"
5129 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5130 if (likely(!tg3_irq_sync(tp)))
5131 napi_schedule(&tnapi->napi);
5133 return IRQ_RETVAL(1);
5136 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5138 struct tg3_napi *tnapi = dev_id;
5139 struct tg3 *tp = tnapi->tp;
5140 struct tg3_hw_status *sblk = tnapi->hw_status;
5141 unsigned int handled = 1;
5143 /* In INTx mode, it is possible for the interrupt to arrive at
5144 * the CPU before the status block posted prior to the interrupt.
5145 * Reading the PCI State register will confirm whether the
5146 * interrupt is ours and will flush the status block.
5148 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5149 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5150 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5157 * Writing any value to intr-mbox-0 clears PCI INTA# and
5158 * chip-internal interrupt pending events.
5159 * Writing non-zero to intr-mbox-0 additional tells the
5160 * NIC to stop sending us irqs, engaging "in-intr-handler"
5163 * Flush the mailbox to de-assert the IRQ immediately to prevent
5164 * spurious interrupts. The flush impacts performance but
5165 * excessive spurious interrupts can be worse in some cases.
5167 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5168 if (tg3_irq_sync(tp))
5170 sblk->status &= ~SD_STATUS_UPDATED;
5171 if (likely(tg3_has_work(tnapi))) {
5172 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5173 napi_schedule(&tnapi->napi);
5175 /* No work, shared interrupt perhaps? re-enable
5176 * interrupts, and flush that PCI write
5178 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5182 return IRQ_RETVAL(handled);
5185 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5187 struct tg3_napi *tnapi = dev_id;
5188 struct tg3 *tp = tnapi->tp;
5189 struct tg3_hw_status *sblk = tnapi->hw_status;
5190 unsigned int handled = 1;
5192 /* In INTx mode, it is possible for the interrupt to arrive at
5193 * the CPU before the status block posted prior to the interrupt.
5194 * Reading the PCI State register will confirm whether the
5195 * interrupt is ours and will flush the status block.
5197 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5198 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5199 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5206 * writing any value to intr-mbox-0 clears PCI INTA# and
5207 * chip-internal interrupt pending events.
5208 * writing non-zero to intr-mbox-0 additional tells the
5209 * NIC to stop sending us irqs, engaging "in-intr-handler"
5212 * Flush the mailbox to de-assert the IRQ immediately to prevent
5213 * spurious interrupts. The flush impacts performance but
5214 * excessive spurious interrupts can be worse in some cases.
5216 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5219 * In a shared interrupt configuration, sometimes other devices'
5220 * interrupts will scream. We record the current status tag here
5221 * so that the above check can report that the screaming interrupts
5222 * are unhandled. Eventually they will be silenced.
5224 tnapi->last_irq_tag = sblk->status_tag;
5226 if (tg3_irq_sync(tp))
5229 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5231 napi_schedule(&tnapi->napi);
5234 return IRQ_RETVAL(handled);
5237 /* ISR for interrupt test */
5238 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5240 struct tg3_napi *tnapi = dev_id;
5241 struct tg3 *tp = tnapi->tp;
5242 struct tg3_hw_status *sblk = tnapi->hw_status;
5244 if ((sblk->status & SD_STATUS_UPDATED) ||
5245 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5246 tg3_disable_ints(tp);
5247 return IRQ_RETVAL(1);
5249 return IRQ_RETVAL(0);
5252 static int tg3_init_hw(struct tg3 *, int);
5253 static int tg3_halt(struct tg3 *, int, int);
5255 /* Restart hardware after configuration changes, self-test, etc.
5256 * Invoked with tp->lock held.
5258 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5259 __releases(tp->lock)
5260 __acquires(tp->lock)
5264 err = tg3_init_hw(tp, reset_phy);
5266 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5267 "aborting.\n", tp->dev->name);
5268 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5269 tg3_full_unlock(tp);
5270 del_timer_sync(&tp->timer);
5272 tg3_napi_enable(tp);
5274 tg3_full_lock(tp, 0);
5279 #ifdef CONFIG_NET_POLL_CONTROLLER
5280 static void tg3_poll_controller(struct net_device *dev)
5283 struct tg3 *tp = netdev_priv(dev);
5285 for (i = 0; i < tp->irq_cnt; i++)
5286 tg3_interrupt(tp->napi[i].irq_vec, dev);
5290 static void tg3_reset_task(struct work_struct *work)
5292 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5294 unsigned int restart_timer;
5296 tg3_full_lock(tp, 0);
5298 if (!netif_running(tp->dev)) {
5299 tg3_full_unlock(tp);
5303 tg3_full_unlock(tp);
5309 tg3_full_lock(tp, 1);
5311 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5312 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5314 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5315 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5316 tp->write32_rx_mbox = tg3_write_flush_reg32;
5317 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5318 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5321 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5322 err = tg3_init_hw(tp, 1);
5326 tg3_netif_start(tp);
5329 mod_timer(&tp->timer, jiffies + 1);
5332 tg3_full_unlock(tp);
5338 static void tg3_dump_short_state(struct tg3 *tp)
5340 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5341 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5342 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5343 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5346 static void tg3_tx_timeout(struct net_device *dev)
5348 struct tg3 *tp = netdev_priv(dev);
5350 if (netif_msg_tx_err(tp)) {
5351 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5353 tg3_dump_short_state(tp);
5356 schedule_work(&tp->reset_task);
5359 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5360 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5362 u32 base = (u32) mapping & 0xffffffff;
5364 return ((base > 0xffffdcc0) &&
5365 (base + len + 8 < base));
5368 /* Test for DMA addresses > 40-bit */
5369 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5372 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5373 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5374 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5381 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5383 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5384 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5385 struct sk_buff *skb, u32 last_plus_one,
5386 u32 *start, u32 base_flags, u32 mss)
5388 struct tg3 *tp = tnapi->tp;
5389 struct sk_buff *new_skb;
5390 dma_addr_t new_addr = 0;
5394 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5395 new_skb = skb_copy(skb, GFP_ATOMIC);
5397 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5399 new_skb = skb_copy_expand(skb,
5400 skb_headroom(skb) + more_headroom,
5401 skb_tailroom(skb), GFP_ATOMIC);
5407 /* New SKB is guaranteed to be linear. */
5409 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5411 /* Make sure the mapping succeeded */
5412 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5414 dev_kfree_skb(new_skb);
5417 /* Make sure new skb does not cross any 4G boundaries.
5418 * Drop the packet if it does.
5420 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5421 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5422 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5425 dev_kfree_skb(new_skb);
5428 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5429 base_flags, 1 | (mss << 1));
5430 *start = NEXT_TX(entry);
5434 /* Now clean up the sw ring entries. */
5436 while (entry != last_plus_one) {
5440 len = skb_headlen(skb);
5442 len = skb_shinfo(skb)->frags[i-1].size;
5444 pci_unmap_single(tp->pdev,
5445 pci_unmap_addr(&tnapi->tx_buffers[entry],
5447 len, PCI_DMA_TODEVICE);
5449 tnapi->tx_buffers[entry].skb = new_skb;
5450 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5453 tnapi->tx_buffers[entry].skb = NULL;
5455 entry = NEXT_TX(entry);
5464 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5465 dma_addr_t mapping, int len, u32 flags,
5468 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5469 int is_end = (mss_and_is_end & 0x1);
5470 u32 mss = (mss_and_is_end >> 1);
5474 flags |= TXD_FLAG_END;
5475 if (flags & TXD_FLAG_VLAN) {
5476 vlan_tag = flags >> 16;
5479 vlan_tag |= (mss << TXD_MSS_SHIFT);
5481 txd->addr_hi = ((u64) mapping >> 32);
5482 txd->addr_lo = ((u64) mapping & 0xffffffff);
5483 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5484 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5487 /* hard_start_xmit for devices that don't have any bugs and
5488 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5490 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5491 struct net_device *dev)
5493 struct tg3 *tp = netdev_priv(dev);
5494 u32 len, entry, base_flags, mss;
5496 struct tg3_napi *tnapi;
5497 struct netdev_queue *txq;
5498 unsigned int i, last;
5501 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5502 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5503 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5506 /* We are running in BH disabled context with netif_tx_lock
5507 * and TX reclaim runs via tp->napi.poll inside of a software
5508 * interrupt. Furthermore, IRQ processing runs lockless so we have
5509 * no IRQ context deadlocks to worry about either. Rejoice!
5511 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5512 if (!netif_tx_queue_stopped(txq)) {
5513 netif_tx_stop_queue(txq);
5515 /* This is a hard error, log it. */
5516 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5517 "queue awake!\n", dev->name);
5519 return NETDEV_TX_BUSY;
5522 entry = tnapi->tx_prod;
5525 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5526 int tcp_opt_len, ip_tcp_len;
5529 if (skb_header_cloned(skb) &&
5530 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5535 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5536 hdrlen = skb_headlen(skb) - ETH_HLEN;
5538 struct iphdr *iph = ip_hdr(skb);
5540 tcp_opt_len = tcp_optlen(skb);
5541 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5544 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5545 hdrlen = ip_tcp_len + tcp_opt_len;
5548 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5549 mss |= (hdrlen & 0xc) << 12;
5551 base_flags |= 0x00000010;
5552 base_flags |= (hdrlen & 0x3e0) << 5;
5556 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5557 TXD_FLAG_CPU_POST_DMA);
5559 tcp_hdr(skb)->check = 0;
5562 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5563 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5564 #if TG3_VLAN_TAG_USED
5565 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5566 base_flags |= (TXD_FLAG_VLAN |
5567 (vlan_tx_tag_get(skb) << 16));
5570 len = skb_headlen(skb);
5572 /* Queue skb data, a.k.a. the main skb fragment. */
5573 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5574 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5579 tnapi->tx_buffers[entry].skb = skb;
5580 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5582 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5583 !mss && skb->len > ETH_DATA_LEN)
5584 base_flags |= TXD_FLAG_JMB_PKT;
5586 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5587 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5589 entry = NEXT_TX(entry);
5591 /* Now loop through additional data fragments, and queue them. */
5592 if (skb_shinfo(skb)->nr_frags > 0) {
5593 last = skb_shinfo(skb)->nr_frags - 1;
5594 for (i = 0; i <= last; i++) {
5595 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5598 mapping = pci_map_page(tp->pdev,
5601 len, PCI_DMA_TODEVICE);
5602 if (pci_dma_mapping_error(tp->pdev, mapping))
5605 tnapi->tx_buffers[entry].skb = NULL;
5606 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5609 tg3_set_txd(tnapi, entry, mapping, len,
5610 base_flags, (i == last) | (mss << 1));
5612 entry = NEXT_TX(entry);
5616 /* Packets are ready, update Tx producer idx local and on card. */
5617 tw32_tx_mbox(tnapi->prodmbox, entry);
5619 tnapi->tx_prod = entry;
5620 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5621 netif_tx_stop_queue(txq);
5622 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5623 netif_tx_wake_queue(txq);
5629 return NETDEV_TX_OK;
5633 entry = tnapi->tx_prod;
5634 tnapi->tx_buffers[entry].skb = NULL;
5635 pci_unmap_single(tp->pdev,
5636 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5639 for (i = 0; i <= last; i++) {
5640 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5641 entry = NEXT_TX(entry);
5643 pci_unmap_page(tp->pdev,
5644 pci_unmap_addr(&tnapi->tx_buffers[entry],
5646 frag->size, PCI_DMA_TODEVICE);
5650 return NETDEV_TX_OK;
5653 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5654 struct net_device *);
5656 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5657 * TSO header is greater than 80 bytes.
5659 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5661 struct sk_buff *segs, *nskb;
5662 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5664 /* Estimate the number of fragments in the worst case */
5665 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5666 netif_stop_queue(tp->dev);
5667 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5668 return NETDEV_TX_BUSY;
5670 netif_wake_queue(tp->dev);
5673 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5675 goto tg3_tso_bug_end;
5681 tg3_start_xmit_dma_bug(nskb, tp->dev);
5687 return NETDEV_TX_OK;
5690 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5691 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5693 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5694 struct net_device *dev)
5696 struct tg3 *tp = netdev_priv(dev);
5697 u32 len, entry, base_flags, mss;
5698 int would_hit_hwbug;
5700 struct tg3_napi *tnapi;
5701 struct netdev_queue *txq;
5702 unsigned int i, last;
5705 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5706 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5707 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5710 /* We are running in BH disabled context with netif_tx_lock
5711 * and TX reclaim runs via tp->napi.poll inside of a software
5712 * interrupt. Furthermore, IRQ processing runs lockless so we have
5713 * no IRQ context deadlocks to worry about either. Rejoice!
5715 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5716 if (!netif_tx_queue_stopped(txq)) {
5717 netif_tx_stop_queue(txq);
5719 /* This is a hard error, log it. */
5720 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5721 "queue awake!\n", dev->name);
5723 return NETDEV_TX_BUSY;
5726 entry = tnapi->tx_prod;
5728 if (skb->ip_summed == CHECKSUM_PARTIAL)
5729 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5731 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5733 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5735 if (skb_header_cloned(skb) &&
5736 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5741 tcp_opt_len = tcp_optlen(skb);
5742 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5744 hdr_len = ip_tcp_len + tcp_opt_len;
5745 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5746 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5747 return (tg3_tso_bug(tp, skb));
5749 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5750 TXD_FLAG_CPU_POST_DMA);
5754 iph->tot_len = htons(mss + hdr_len);
5755 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5756 tcp_hdr(skb)->check = 0;
5757 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5759 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5764 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5765 mss |= (hdr_len & 0xc) << 12;
5767 base_flags |= 0x00000010;
5768 base_flags |= (hdr_len & 0x3e0) << 5;
5769 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5770 mss |= hdr_len << 9;
5771 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5772 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5773 if (tcp_opt_len || iph->ihl > 5) {
5776 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5777 mss |= (tsflags << 11);
5780 if (tcp_opt_len || iph->ihl > 5) {
5783 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5784 base_flags |= tsflags << 12;
5788 #if TG3_VLAN_TAG_USED
5789 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5790 base_flags |= (TXD_FLAG_VLAN |
5791 (vlan_tx_tag_get(skb) << 16));
5794 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5795 !mss && skb->len > ETH_DATA_LEN)
5796 base_flags |= TXD_FLAG_JMB_PKT;
5798 len = skb_headlen(skb);
5800 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5801 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5806 tnapi->tx_buffers[entry].skb = skb;
5807 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5809 would_hit_hwbug = 0;
5811 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5812 would_hit_hwbug = 1;
5814 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5815 tg3_4g_overflow_test(mapping, len))
5816 would_hit_hwbug = 1;
5818 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5819 tg3_40bit_overflow_test(tp, mapping, len))
5820 would_hit_hwbug = 1;
5822 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5823 would_hit_hwbug = 1;
5825 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5826 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5828 entry = NEXT_TX(entry);
5830 /* Now loop through additional data fragments, and queue them. */
5831 if (skb_shinfo(skb)->nr_frags > 0) {
5832 last = skb_shinfo(skb)->nr_frags - 1;
5833 for (i = 0; i <= last; i++) {
5834 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5837 mapping = pci_map_page(tp->pdev,
5840 len, PCI_DMA_TODEVICE);
5842 tnapi->tx_buffers[entry].skb = NULL;
5843 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5845 if (pci_dma_mapping_error(tp->pdev, mapping))
5848 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5850 would_hit_hwbug = 1;
5852 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5853 tg3_4g_overflow_test(mapping, len))
5854 would_hit_hwbug = 1;
5856 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5857 tg3_40bit_overflow_test(tp, mapping, len))
5858 would_hit_hwbug = 1;
5860 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5861 tg3_set_txd(tnapi, entry, mapping, len,
5862 base_flags, (i == last)|(mss << 1));
5864 tg3_set_txd(tnapi, entry, mapping, len,
5865 base_flags, (i == last));
5867 entry = NEXT_TX(entry);
5871 if (would_hit_hwbug) {
5872 u32 last_plus_one = entry;
5875 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5876 start &= (TG3_TX_RING_SIZE - 1);
5878 /* If the workaround fails due to memory/mapping
5879 * failure, silently drop this packet.
5881 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5882 &start, base_flags, mss))
5888 /* Packets are ready, update Tx producer idx local and on card. */
5889 tw32_tx_mbox(tnapi->prodmbox, entry);
5891 tnapi->tx_prod = entry;
5892 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5893 netif_tx_stop_queue(txq);
5894 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5895 netif_tx_wake_queue(txq);
5901 return NETDEV_TX_OK;
5905 entry = tnapi->tx_prod;
5906 tnapi->tx_buffers[entry].skb = NULL;
5907 pci_unmap_single(tp->pdev,
5908 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5911 for (i = 0; i <= last; i++) {
5912 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5913 entry = NEXT_TX(entry);
5915 pci_unmap_page(tp->pdev,
5916 pci_unmap_addr(&tnapi->tx_buffers[entry],
5918 frag->size, PCI_DMA_TODEVICE);
5922 return NETDEV_TX_OK;
5925 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5930 if (new_mtu > ETH_DATA_LEN) {
5931 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5932 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5933 ethtool_op_set_tso(dev, 0);
5936 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5938 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5939 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5940 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5944 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5946 struct tg3 *tp = netdev_priv(dev);
5949 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5952 if (!netif_running(dev)) {
5953 /* We'll just catch it later when the
5956 tg3_set_mtu(dev, tp, new_mtu);
5964 tg3_full_lock(tp, 1);
5966 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5968 tg3_set_mtu(dev, tp, new_mtu);
5970 err = tg3_restart_hw(tp, 0);
5973 tg3_netif_start(tp);
5975 tg3_full_unlock(tp);
5983 static void tg3_rx_prodring_free(struct tg3 *tp,
5984 struct tg3_rx_prodring_set *tpr)
5988 if (tpr != &tp->prodring[0]) {
5989 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5990 i = (i + 1) % TG3_RX_RING_SIZE)
5991 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5994 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5995 for (i = tpr->rx_jmb_cons_idx;
5996 i != tpr->rx_jmb_prod_idx;
5997 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5998 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6006 for (i = 0; i < TG3_RX_RING_SIZE; i++)
6007 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6010 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6011 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6012 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6017 /* Initialize tx/rx rings for packet processing.
6019 * The chip has been shut down and the driver detached from
6020 * the networking, so no interrupts or new tx packets will
6021 * end up in the driver. tp->{tx,}lock are held and thus
6024 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6025 struct tg3_rx_prodring_set *tpr)
6027 u32 i, rx_pkt_dma_sz;
6029 tpr->rx_std_cons_idx = 0;
6030 tpr->rx_std_prod_idx = 0;
6031 tpr->rx_jmb_cons_idx = 0;
6032 tpr->rx_jmb_prod_idx = 0;
6034 if (tpr != &tp->prodring[0]) {
6035 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6036 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6037 memset(&tpr->rx_jmb_buffers[0], 0,
6038 TG3_RX_JMB_BUFF_RING_SIZE);
6042 /* Zero out all descriptors. */
6043 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6045 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6046 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6047 tp->dev->mtu > ETH_DATA_LEN)
6048 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6049 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6051 /* Initialize invariants of the rings, we only set this
6052 * stuff once. This works because the card does not
6053 * write into the rx buffer posting rings.
6055 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6056 struct tg3_rx_buffer_desc *rxd;
6058 rxd = &tpr->rx_std[i];
6059 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6060 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6061 rxd->opaque = (RXD_OPAQUE_RING_STD |
6062 (i << RXD_OPAQUE_INDEX_SHIFT));
6065 /* Now allocate fresh SKBs for each rx ring. */
6066 for (i = 0; i < tp->rx_pending; i++) {
6067 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6068 printk(KERN_WARNING PFX
6069 "%s: Using a smaller RX standard ring, "
6070 "only %d out of %d buffers were allocated "
6072 tp->dev->name, i, tp->rx_pending);
6080 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6083 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6085 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6086 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6087 struct tg3_rx_buffer_desc *rxd;
6089 rxd = &tpr->rx_jmb[i].std;
6090 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6091 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6093 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6094 (i << RXD_OPAQUE_INDEX_SHIFT));
6097 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6098 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
6100 printk(KERN_WARNING PFX
6101 "%s: Using a smaller RX jumbo ring, "
6102 "only %d out of %d buffers were "
6103 "allocated successfully.\n",
6104 tp->dev->name, i, tp->rx_jumbo_pending);
6107 tp->rx_jumbo_pending = i;
6117 tg3_rx_prodring_free(tp, tpr);
6121 static void tg3_rx_prodring_fini(struct tg3 *tp,
6122 struct tg3_rx_prodring_set *tpr)
6124 kfree(tpr->rx_std_buffers);
6125 tpr->rx_std_buffers = NULL;
6126 kfree(tpr->rx_jmb_buffers);
6127 tpr->rx_jmb_buffers = NULL;
6129 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6130 tpr->rx_std, tpr->rx_std_mapping);
6134 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6135 tpr->rx_jmb, tpr->rx_jmb_mapping);
6140 static int tg3_rx_prodring_init(struct tg3 *tp,
6141 struct tg3_rx_prodring_set *tpr)
6143 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6144 if (!tpr->rx_std_buffers)
6147 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6148 &tpr->rx_std_mapping);
6152 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6153 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6155 if (!tpr->rx_jmb_buffers)
6158 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6159 TG3_RX_JUMBO_RING_BYTES,
6160 &tpr->rx_jmb_mapping);
6168 tg3_rx_prodring_fini(tp, tpr);
6172 /* Free up pending packets in all rx/tx rings.
6174 * The chip has been shut down and the driver detached from
6175 * the networking, so no interrupts or new tx packets will
6176 * end up in the driver. tp->{tx,}lock is not held and we are not
6177 * in an interrupt context and thus may sleep.
6179 static void tg3_free_rings(struct tg3 *tp)
6183 for (j = 0; j < tp->irq_cnt; j++) {
6184 struct tg3_napi *tnapi = &tp->napi[j];
6186 if (!tnapi->tx_buffers)
6189 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6190 struct ring_info *txp;
6191 struct sk_buff *skb;
6194 txp = &tnapi->tx_buffers[i];
6202 pci_unmap_single(tp->pdev,
6203 pci_unmap_addr(txp, mapping),
6210 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6211 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6212 pci_unmap_page(tp->pdev,
6213 pci_unmap_addr(txp, mapping),
6214 skb_shinfo(skb)->frags[k].size,
6219 dev_kfree_skb_any(skb);
6222 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6226 /* Initialize tx/rx rings for packet processing.
6228 * The chip has been shut down and the driver detached from
6229 * the networking, so no interrupts or new tx packets will
6230 * end up in the driver. tp->{tx,}lock are held and thus
6233 static int tg3_init_rings(struct tg3 *tp)
6237 /* Free up all the SKBs. */
6240 for (i = 0; i < tp->irq_cnt; i++) {
6241 struct tg3_napi *tnapi = &tp->napi[i];
6243 tnapi->last_tag = 0;
6244 tnapi->last_irq_tag = 0;
6245 tnapi->hw_status->status = 0;
6246 tnapi->hw_status->status_tag = 0;
6247 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6252 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6254 tnapi->rx_rcb_ptr = 0;
6256 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6258 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
6268 * Must not be invoked with interrupt sources disabled and
6269 * the hardware shutdown down.
6271 static void tg3_free_consistent(struct tg3 *tp)
6275 for (i = 0; i < tp->irq_cnt; i++) {
6276 struct tg3_napi *tnapi = &tp->napi[i];
6278 if (tnapi->tx_ring) {
6279 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6280 tnapi->tx_ring, tnapi->tx_desc_mapping);
6281 tnapi->tx_ring = NULL;
6284 kfree(tnapi->tx_buffers);
6285 tnapi->tx_buffers = NULL;
6287 if (tnapi->rx_rcb) {
6288 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6290 tnapi->rx_rcb_mapping);
6291 tnapi->rx_rcb = NULL;
6294 if (tnapi->hw_status) {
6295 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6297 tnapi->status_mapping);
6298 tnapi->hw_status = NULL;
6303 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6304 tp->hw_stats, tp->stats_mapping);
6305 tp->hw_stats = NULL;
6308 for (i = 0; i < tp->irq_cnt; i++)
6309 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6313 * Must not be invoked with interrupt sources disabled and
6314 * the hardware shutdown down. Can sleep.
6316 static int tg3_alloc_consistent(struct tg3 *tp)
6320 for (i = 0; i < tp->irq_cnt; i++) {
6321 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6325 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6326 sizeof(struct tg3_hw_stats),
6327 &tp->stats_mapping);
6331 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6333 for (i = 0; i < tp->irq_cnt; i++) {
6334 struct tg3_napi *tnapi = &tp->napi[i];
6335 struct tg3_hw_status *sblk;
6337 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6339 &tnapi->status_mapping);
6340 if (!tnapi->hw_status)
6343 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6344 sblk = tnapi->hw_status;
6346 /* If multivector TSS is enabled, vector 0 does not handle
6347 * tx interrupts. Don't allocate any resources for it.
6349 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6350 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6351 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6354 if (!tnapi->tx_buffers)
6357 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6359 &tnapi->tx_desc_mapping);
6360 if (!tnapi->tx_ring)
6365 * When RSS is enabled, the status block format changes
6366 * slightly. The "rx_jumbo_consumer", "reserved",
6367 * and "rx_mini_consumer" members get mapped to the
6368 * other three rx return ring producer indexes.
6372 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6375 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6378 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6381 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6385 tnapi->prodring = &tp->prodring[i];
6388 * If multivector RSS is enabled, vector 0 does not handle
6389 * rx or tx interrupts. Don't allocate any resources for it.
6391 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6394 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6395 TG3_RX_RCB_RING_BYTES(tp),
6396 &tnapi->rx_rcb_mapping);
6400 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6406 tg3_free_consistent(tp);
6410 #define MAX_WAIT_CNT 1000
6412 /* To stop a block, clear the enable bit and poll till it
6413 * clears. tp->lock is held.
6415 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6420 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6427 /* We can't enable/disable these bits of the
6428 * 5705/5750, just say success.
6441 for (i = 0; i < MAX_WAIT_CNT; i++) {
6444 if ((val & enable_bit) == 0)
6448 if (i == MAX_WAIT_CNT && !silent) {
6449 printk(KERN_ERR PFX "tg3_stop_block timed out, "
6450 "ofs=%lx enable_bit=%x\n",
6458 /* tp->lock is held. */
6459 static int tg3_abort_hw(struct tg3 *tp, int silent)
6463 tg3_disable_ints(tp);
6465 tp->rx_mode &= ~RX_MODE_ENABLE;
6466 tw32_f(MAC_RX_MODE, tp->rx_mode);
6469 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6470 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6471 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6472 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6473 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6474 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6476 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6477 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6478 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6479 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6480 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6481 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6482 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6484 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6485 tw32_f(MAC_MODE, tp->mac_mode);
6488 tp->tx_mode &= ~TX_MODE_ENABLE;
6489 tw32_f(MAC_TX_MODE, tp->tx_mode);
6491 for (i = 0; i < MAX_WAIT_CNT; i++) {
6493 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6496 if (i >= MAX_WAIT_CNT) {
6497 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6498 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6499 tp->dev->name, tr32(MAC_TX_MODE));
6503 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6504 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6505 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6507 tw32(FTQ_RESET, 0xffffffff);
6508 tw32(FTQ_RESET, 0x00000000);
6510 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6511 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6513 for (i = 0; i < tp->irq_cnt; i++) {
6514 struct tg3_napi *tnapi = &tp->napi[i];
6515 if (tnapi->hw_status)
6516 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6519 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6524 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6529 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6530 if (apedata != APE_SEG_SIG_MAGIC)
6533 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6534 if (!(apedata & APE_FW_STATUS_READY))
6537 /* Wait for up to 1 millisecond for APE to service previous event. */
6538 for (i = 0; i < 10; i++) {
6539 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6542 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6544 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6545 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6546 event | APE_EVENT_STATUS_EVENT_PENDING);
6548 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6550 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6556 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6557 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6560 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6565 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6569 case RESET_KIND_INIT:
6570 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6571 APE_HOST_SEG_SIG_MAGIC);
6572 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6573 APE_HOST_SEG_LEN_MAGIC);
6574 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6575 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6576 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6577 APE_HOST_DRIVER_ID_MAGIC);
6578 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6579 APE_HOST_BEHAV_NO_PHYLOCK);
6581 event = APE_EVENT_STATUS_STATE_START;
6583 case RESET_KIND_SHUTDOWN:
6584 /* With the interface we are currently using,
6585 * APE does not track driver state. Wiping
6586 * out the HOST SEGMENT SIGNATURE forces
6587 * the APE to assume OS absent status.
6589 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6591 event = APE_EVENT_STATUS_STATE_UNLOAD;
6593 case RESET_KIND_SUSPEND:
6594 event = APE_EVENT_STATUS_STATE_SUSPEND;
6600 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6602 tg3_ape_send_event(tp, event);
6605 /* tp->lock is held. */
6606 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6608 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6609 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6611 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6613 case RESET_KIND_INIT:
6614 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6618 case RESET_KIND_SHUTDOWN:
6619 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6623 case RESET_KIND_SUSPEND:
6624 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6633 if (kind == RESET_KIND_INIT ||
6634 kind == RESET_KIND_SUSPEND)
6635 tg3_ape_driver_state_change(tp, kind);
6638 /* tp->lock is held. */
6639 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6641 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6643 case RESET_KIND_INIT:
6644 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6645 DRV_STATE_START_DONE);
6648 case RESET_KIND_SHUTDOWN:
6649 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6650 DRV_STATE_UNLOAD_DONE);
6658 if (kind == RESET_KIND_SHUTDOWN)
6659 tg3_ape_driver_state_change(tp, kind);
6662 /* tp->lock is held. */
6663 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6665 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6667 case RESET_KIND_INIT:
6668 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6672 case RESET_KIND_SHUTDOWN:
6673 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6677 case RESET_KIND_SUSPEND:
6678 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6688 static int tg3_poll_fw(struct tg3 *tp)
6693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6694 /* Wait up to 20ms for init done. */
6695 for (i = 0; i < 200; i++) {
6696 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6703 /* Wait for firmware initialization to complete. */
6704 for (i = 0; i < 100000; i++) {
6705 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6706 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6711 /* Chip might not be fitted with firmware. Some Sun onboard
6712 * parts are configured like that. So don't signal the timeout
6713 * of the above loop as an error, but do report the lack of
6714 * running firmware once.
6717 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6718 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6720 printk(KERN_INFO PFX "%s: No firmware running.\n",
6724 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6725 /* The 57765 A0 needs a little more
6726 * time to do some important work.
6734 /* Save PCI command register before chip reset */
6735 static void tg3_save_pci_state(struct tg3 *tp)
6737 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6740 /* Restore PCI state after chip reset */
6741 static void tg3_restore_pci_state(struct tg3 *tp)
6745 /* Re-enable indirect register accesses. */
6746 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6747 tp->misc_host_ctrl);
6749 /* Set MAX PCI retry to zero. */
6750 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6751 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6752 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6753 val |= PCISTATE_RETRY_SAME_DMA;
6754 /* Allow reads and writes to the APE register and memory space. */
6755 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6756 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6757 PCISTATE_ALLOW_APE_SHMEM_WR;
6758 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6760 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6762 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6763 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6764 pcie_set_readrq(tp->pdev, 4096);
6766 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6767 tp->pci_cacheline_sz);
6768 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6773 /* Make sure PCI-X relaxed ordering bit is clear. */
6774 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6777 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6779 pcix_cmd &= ~PCI_X_CMD_ERO;
6780 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6784 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6786 /* Chip reset on 5780 will reset MSI enable bit,
6787 * so need to restore it.
6789 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6792 pci_read_config_word(tp->pdev,
6793 tp->msi_cap + PCI_MSI_FLAGS,
6795 pci_write_config_word(tp->pdev,
6796 tp->msi_cap + PCI_MSI_FLAGS,
6797 ctrl | PCI_MSI_FLAGS_ENABLE);
6798 val = tr32(MSGINT_MODE);
6799 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6804 static void tg3_stop_fw(struct tg3 *);
6806 /* tp->lock is held. */
6807 static int tg3_chip_reset(struct tg3 *tp)
6810 void (*write_op)(struct tg3 *, u32, u32);
6815 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6817 /* No matching tg3_nvram_unlock() after this because
6818 * chip reset below will undo the nvram lock.
6820 tp->nvram_lock_cnt = 0;
6822 /* GRC_MISC_CFG core clock reset will clear the memory
6823 * enable bit in PCI register 4 and the MSI enable bit
6824 * on some chips, so we save relevant registers here.
6826 tg3_save_pci_state(tp);
6828 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6829 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6830 tw32(GRC_FASTBOOT_PC, 0);
6833 * We must avoid the readl() that normally takes place.
6834 * It locks machines, causes machine checks, and other
6835 * fun things. So, temporarily disable the 5701
6836 * hardware workaround, while we do the reset.
6838 write_op = tp->write32;
6839 if (write_op == tg3_write_flush_reg32)
6840 tp->write32 = tg3_write32;
6842 /* Prevent the irq handler from reading or writing PCI registers
6843 * during chip reset when the memory enable bit in the PCI command
6844 * register may be cleared. The chip does not generate interrupt
6845 * at this time, but the irq handler may still be called due to irq
6846 * sharing or irqpoll.
6848 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6849 for (i = 0; i < tp->irq_cnt; i++) {
6850 struct tg3_napi *tnapi = &tp->napi[i];
6851 if (tnapi->hw_status) {
6852 tnapi->hw_status->status = 0;
6853 tnapi->hw_status->status_tag = 0;
6855 tnapi->last_tag = 0;
6856 tnapi->last_irq_tag = 0;
6860 for (i = 0; i < tp->irq_cnt; i++)
6861 synchronize_irq(tp->napi[i].irq_vec);
6863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6864 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6865 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6869 val = GRC_MISC_CFG_CORECLK_RESET;
6871 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6872 if (tr32(0x7e2c) == 0x60) {
6875 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6876 tw32(GRC_MISC_CFG, (1 << 29));
6881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6882 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6883 tw32(GRC_VCPU_EXT_CTRL,
6884 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6887 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6888 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6889 tw32(GRC_MISC_CFG, val);
6891 /* restore 5701 hardware bug workaround write method */
6892 tp->write32 = write_op;
6894 /* Unfortunately, we have to delay before the PCI read back.
6895 * Some 575X chips even will not respond to a PCI cfg access
6896 * when the reset command is given to the chip.
6898 * How do these hardware designers expect things to work
6899 * properly if the PCI write is posted for a long period
6900 * of time? It is always necessary to have some method by
6901 * which a register read back can occur to push the write
6902 * out which does the reset.
6904 * For most tg3 variants the trick below was working.
6909 /* Flush PCI posted writes. The normal MMIO registers
6910 * are inaccessible at this time so this is the only
6911 * way to make this reliably (actually, this is no longer
6912 * the case, see above). I tried to use indirect
6913 * register read/write but this upset some 5701 variants.
6915 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6919 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6922 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6926 /* Wait for link training to complete. */
6927 for (i = 0; i < 5000; i++)
6930 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6931 pci_write_config_dword(tp->pdev, 0xc4,
6932 cfg_val | (1 << 15));
6935 /* Clear the "no snoop" and "relaxed ordering" bits. */
6936 pci_read_config_word(tp->pdev,
6937 tp->pcie_cap + PCI_EXP_DEVCTL,
6939 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6940 PCI_EXP_DEVCTL_NOSNOOP_EN);
6942 * Older PCIe devices only support the 128 byte
6943 * MPS setting. Enforce the restriction.
6945 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6946 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6947 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6948 pci_write_config_word(tp->pdev,
6949 tp->pcie_cap + PCI_EXP_DEVCTL,
6952 pcie_set_readrq(tp->pdev, 4096);
6954 /* Clear error status */
6955 pci_write_config_word(tp->pdev,
6956 tp->pcie_cap + PCI_EXP_DEVSTA,
6957 PCI_EXP_DEVSTA_CED |
6958 PCI_EXP_DEVSTA_NFED |
6959 PCI_EXP_DEVSTA_FED |
6960 PCI_EXP_DEVSTA_URD);
6963 tg3_restore_pci_state(tp);
6965 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6968 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6969 val = tr32(MEMARB_MODE);
6970 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6972 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6974 tw32(0x5000, 0x400);
6977 tw32(GRC_MODE, tp->grc_mode);
6979 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6982 tw32(0xc4, val | (1 << 15));
6985 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6987 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6988 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6989 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6990 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6993 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6994 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6995 tw32_f(MAC_MODE, tp->mac_mode);
6996 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6997 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6998 tw32_f(MAC_MODE, tp->mac_mode);
6999 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7000 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7001 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7002 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7003 tw32_f(MAC_MODE, tp->mac_mode);
7005 tw32_f(MAC_MODE, 0);
7008 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7010 err = tg3_poll_fw(tp);
7016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7019 phy_addr = tp->phy_addr;
7020 tp->phy_addr = TG3_PHY_PCIE_ADDR;
7022 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7023 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
7024 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
7025 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
7026 TG3_PCIEPHY_TX0CTRL1_NB_EN;
7027 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
7030 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7031 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
7032 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
7033 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
7034 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
7037 tp->phy_addr = phy_addr;
7040 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7041 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7042 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7043 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7044 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
7047 tw32(0x7c00, val | (1 << 25));
7050 /* Reprobe ASF enable state. */
7051 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7052 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7053 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7054 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7057 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7058 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7059 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7060 tp->last_event_jiffies = jiffies;
7061 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7062 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7069 /* tp->lock is held. */
7070 static void tg3_stop_fw(struct tg3 *tp)
7072 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7073 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7074 /* Wait for RX cpu to ACK the previous event. */
7075 tg3_wait_for_event_ack(tp);
7077 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7079 tg3_generate_fw_event(tp);
7081 /* Wait for RX cpu to ACK this event. */
7082 tg3_wait_for_event_ack(tp);
7086 /* tp->lock is held. */
7087 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7093 tg3_write_sig_pre_reset(tp, kind);
7095 tg3_abort_hw(tp, silent);
7096 err = tg3_chip_reset(tp);
7098 __tg3_set_mac_addr(tp, 0);
7100 tg3_write_sig_legacy(tp, kind);
7101 tg3_write_sig_post_reset(tp, kind);
7109 #define RX_CPU_SCRATCH_BASE 0x30000
7110 #define RX_CPU_SCRATCH_SIZE 0x04000
7111 #define TX_CPU_SCRATCH_BASE 0x34000
7112 #define TX_CPU_SCRATCH_SIZE 0x04000
7114 /* tp->lock is held. */
7115 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7119 BUG_ON(offset == TX_CPU_BASE &&
7120 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7123 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7125 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7128 if (offset == RX_CPU_BASE) {
7129 for (i = 0; i < 10000; i++) {
7130 tw32(offset + CPU_STATE, 0xffffffff);
7131 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7132 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7136 tw32(offset + CPU_STATE, 0xffffffff);
7137 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7140 for (i = 0; i < 10000; i++) {
7141 tw32(offset + CPU_STATE, 0xffffffff);
7142 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7143 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7149 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
7152 (offset == RX_CPU_BASE ? "RX" : "TX"));
7156 /* Clear firmware's nvram arbitration. */
7157 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7158 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7163 unsigned int fw_base;
7164 unsigned int fw_len;
7165 const __be32 *fw_data;
7168 /* tp->lock is held. */
7169 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7170 int cpu_scratch_size, struct fw_info *info)
7172 int err, lock_err, i;
7173 void (*write_op)(struct tg3 *, u32, u32);
7175 if (cpu_base == TX_CPU_BASE &&
7176 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7177 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
7178 "TX cpu firmware on %s which is 5705.\n",
7183 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7184 write_op = tg3_write_mem;
7186 write_op = tg3_write_indirect_reg32;
7188 /* It is possible that bootcode is still loading at this point.
7189 * Get the nvram lock first before halting the cpu.
7191 lock_err = tg3_nvram_lock(tp);
7192 err = tg3_halt_cpu(tp, cpu_base);
7194 tg3_nvram_unlock(tp);
7198 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7199 write_op(tp, cpu_scratch_base + i, 0);
7200 tw32(cpu_base + CPU_STATE, 0xffffffff);
7201 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7202 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7203 write_op(tp, (cpu_scratch_base +
7204 (info->fw_base & 0xffff) +
7206 be32_to_cpu(info->fw_data[i]));
7214 /* tp->lock is held. */
7215 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7217 struct fw_info info;
7218 const __be32 *fw_data;
7221 fw_data = (void *)tp->fw->data;
7223 /* Firmware blob starts with version numbers, followed by
7224 start address and length. We are setting complete length.
7225 length = end_address_of_bss - start_address_of_text.
7226 Remainder is the blob to be loaded contiguously
7227 from start address. */
7229 info.fw_base = be32_to_cpu(fw_data[1]);
7230 info.fw_len = tp->fw->size - 12;
7231 info.fw_data = &fw_data[3];
7233 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7234 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7239 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7240 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7245 /* Now startup only the RX cpu. */
7246 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7247 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7249 for (i = 0; i < 5; i++) {
7250 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7252 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7253 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7254 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7258 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
7259 "to set RX CPU PC, is %08x should be %08x\n",
7260 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
7264 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7265 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7270 /* 5705 needs a special version of the TSO firmware. */
7272 /* tp->lock is held. */
7273 static int tg3_load_tso_firmware(struct tg3 *tp)
7275 struct fw_info info;
7276 const __be32 *fw_data;
7277 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7280 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7283 fw_data = (void *)tp->fw->data;
7285 /* Firmware blob starts with version numbers, followed by
7286 start address and length. We are setting complete length.
7287 length = end_address_of_bss - start_address_of_text.
7288 Remainder is the blob to be loaded contiguously
7289 from start address. */
7291 info.fw_base = be32_to_cpu(fw_data[1]);
7292 cpu_scratch_size = tp->fw_len;
7293 info.fw_len = tp->fw->size - 12;
7294 info.fw_data = &fw_data[3];
7296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7297 cpu_base = RX_CPU_BASE;
7298 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7300 cpu_base = TX_CPU_BASE;
7301 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7302 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7305 err = tg3_load_firmware_cpu(tp, cpu_base,
7306 cpu_scratch_base, cpu_scratch_size,
7311 /* Now startup the cpu. */
7312 tw32(cpu_base + CPU_STATE, 0xffffffff);
7313 tw32_f(cpu_base + CPU_PC, info.fw_base);
7315 for (i = 0; i < 5; i++) {
7316 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7318 tw32(cpu_base + CPU_STATE, 0xffffffff);
7319 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7320 tw32_f(cpu_base + CPU_PC, info.fw_base);
7324 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7325 "to set CPU PC, is %08x should be %08x\n",
7326 tp->dev->name, tr32(cpu_base + CPU_PC),
7330 tw32(cpu_base + CPU_STATE, 0xffffffff);
7331 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7336 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7338 struct tg3 *tp = netdev_priv(dev);
7339 struct sockaddr *addr = p;
7340 int err = 0, skip_mac_1 = 0;
7342 if (!is_valid_ether_addr(addr->sa_data))
7345 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7347 if (!netif_running(dev))
7350 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7351 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7353 addr0_high = tr32(MAC_ADDR_0_HIGH);
7354 addr0_low = tr32(MAC_ADDR_0_LOW);
7355 addr1_high = tr32(MAC_ADDR_1_HIGH);
7356 addr1_low = tr32(MAC_ADDR_1_LOW);
7358 /* Skip MAC addr 1 if ASF is using it. */
7359 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7360 !(addr1_high == 0 && addr1_low == 0))
7363 spin_lock_bh(&tp->lock);
7364 __tg3_set_mac_addr(tp, skip_mac_1);
7365 spin_unlock_bh(&tp->lock);
7370 /* tp->lock is held. */
7371 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7372 dma_addr_t mapping, u32 maxlen_flags,
7376 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7377 ((u64) mapping >> 32));
7379 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7380 ((u64) mapping & 0xffffffff));
7382 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7385 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7387 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7391 static void __tg3_set_rx_mode(struct net_device *);
7392 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7396 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7397 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7398 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7399 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7401 tw32(HOSTCC_TXCOL_TICKS, 0);
7402 tw32(HOSTCC_TXMAX_FRAMES, 0);
7403 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7406 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
7407 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7408 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7409 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7411 tw32(HOSTCC_RXCOL_TICKS, 0);
7412 tw32(HOSTCC_RXMAX_FRAMES, 0);
7413 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7416 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7417 u32 val = ec->stats_block_coalesce_usecs;
7419 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7420 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7422 if (!netif_carrier_ok(tp->dev))
7425 tw32(HOSTCC_STAT_COAL_TICKS, val);
7428 for (i = 0; i < tp->irq_cnt - 1; i++) {
7431 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7432 tw32(reg, ec->rx_coalesce_usecs);
7433 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7434 tw32(reg, ec->rx_max_coalesced_frames);
7435 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7436 tw32(reg, ec->rx_max_coalesced_frames_irq);
7438 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7439 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7440 tw32(reg, ec->tx_coalesce_usecs);
7441 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7442 tw32(reg, ec->tx_max_coalesced_frames);
7443 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7444 tw32(reg, ec->tx_max_coalesced_frames_irq);
7448 for (; i < tp->irq_max - 1; i++) {
7449 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7450 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7451 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7453 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7454 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7455 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7456 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7461 /* tp->lock is held. */
7462 static void tg3_rings_reset(struct tg3 *tp)
7465 u32 stblk, txrcb, rxrcb, limit;
7466 struct tg3_napi *tnapi = &tp->napi[0];
7468 /* Disable all transmit rings but the first. */
7469 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7470 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7471 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7472 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7474 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7476 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7477 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7478 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7479 BDINFO_FLAGS_DISABLED);
7482 /* Disable all receive return rings but the first. */
7483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7484 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7485 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7486 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7487 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7489 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7491 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7493 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7494 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7495 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7496 BDINFO_FLAGS_DISABLED);
7498 /* Disable interrupts */
7499 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7501 /* Zero mailbox registers. */
7502 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7503 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7504 tp->napi[i].tx_prod = 0;
7505 tp->napi[i].tx_cons = 0;
7506 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7507 tw32_mailbox(tp->napi[i].prodmbox, 0);
7508 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7509 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7511 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7512 tw32_mailbox(tp->napi[0].prodmbox, 0);
7514 tp->napi[0].tx_prod = 0;
7515 tp->napi[0].tx_cons = 0;
7516 tw32_mailbox(tp->napi[0].prodmbox, 0);
7517 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7520 /* Make sure the NIC-based send BD rings are disabled. */
7521 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7522 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7523 for (i = 0; i < 16; i++)
7524 tw32_tx_mbox(mbox + i * 8, 0);
7527 txrcb = NIC_SRAM_SEND_RCB;
7528 rxrcb = NIC_SRAM_RCV_RET_RCB;
7530 /* Clear status block in ram. */
7531 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7533 /* Set status block DMA address */
7534 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7535 ((u64) tnapi->status_mapping >> 32));
7536 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7537 ((u64) tnapi->status_mapping & 0xffffffff));
7539 if (tnapi->tx_ring) {
7540 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7541 (TG3_TX_RING_SIZE <<
7542 BDINFO_FLAGS_MAXLEN_SHIFT),
7543 NIC_SRAM_TX_BUFFER_DESC);
7544 txrcb += TG3_BDINFO_SIZE;
7547 if (tnapi->rx_rcb) {
7548 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7549 (TG3_RX_RCB_RING_SIZE(tp) <<
7550 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7551 rxrcb += TG3_BDINFO_SIZE;
7554 stblk = HOSTCC_STATBLCK_RING1;
7556 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7557 u64 mapping = (u64)tnapi->status_mapping;
7558 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7559 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7561 /* Clear status block in ram. */
7562 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7564 if (tnapi->tx_ring) {
7565 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7566 (TG3_TX_RING_SIZE <<
7567 BDINFO_FLAGS_MAXLEN_SHIFT),
7568 NIC_SRAM_TX_BUFFER_DESC);
7569 txrcb += TG3_BDINFO_SIZE;
7572 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7573 (TG3_RX_RCB_RING_SIZE(tp) <<
7574 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7577 rxrcb += TG3_BDINFO_SIZE;
7581 /* tp->lock is held. */
7582 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7584 u32 val, rdmac_mode;
7586 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7588 tg3_disable_ints(tp);
7592 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7594 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7595 tg3_abort_hw(tp, 1);
7599 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7602 err = tg3_chip_reset(tp);
7606 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7608 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7609 val = tr32(TG3_CPMU_CTRL);
7610 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7611 tw32(TG3_CPMU_CTRL, val);
7613 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7614 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7615 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7616 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7618 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7619 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7620 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7621 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7623 val = tr32(TG3_CPMU_HST_ACC);
7624 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7625 val |= CPMU_HST_ACC_MACCLK_6_25;
7626 tw32(TG3_CPMU_HST_ACC, val);
7629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7630 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7631 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7632 PCIE_PWR_MGMT_L1_THRESH_4MS;
7633 tw32(PCIE_PWR_MGMT_THRESH, val);
7635 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7636 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7638 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7640 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7641 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7644 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7645 u32 grc_mode = tr32(GRC_MODE);
7647 /* Access the lower 1K of PL PCIE block registers. */
7648 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7649 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7651 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7652 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7653 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7655 tw32(GRC_MODE, grc_mode);
7658 /* This works around an issue with Athlon chipsets on
7659 * B3 tigon3 silicon. This bit has no effect on any
7660 * other revision. But do not set this on PCI Express
7661 * chips and don't even touch the clocks if the CPMU is present.
7663 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7664 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7665 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7666 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7669 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7670 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7671 val = tr32(TG3PCI_PCISTATE);
7672 val |= PCISTATE_RETRY_SAME_DMA;
7673 tw32(TG3PCI_PCISTATE, val);
7676 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7677 /* Allow reads and writes to the
7678 * APE register and memory space.
7680 val = tr32(TG3PCI_PCISTATE);
7681 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7682 PCISTATE_ALLOW_APE_SHMEM_WR;
7683 tw32(TG3PCI_PCISTATE, val);
7686 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7687 /* Enable some hw fixes. */
7688 val = tr32(TG3PCI_MSI_DATA);
7689 val |= (1 << 26) | (1 << 28) | (1 << 29);
7690 tw32(TG3PCI_MSI_DATA, val);
7693 /* Descriptor ring init may make accesses to the
7694 * NIC SRAM area to setup the TX descriptors, so we
7695 * can only do this after the hardware has been
7696 * successfully reset.
7698 err = tg3_init_rings(tp);
7702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7704 val = tr32(TG3PCI_DMA_RW_CTRL) &
7705 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7706 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7707 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7708 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7709 /* This value is determined during the probe time DMA
7710 * engine test, tg3_test_dma.
7712 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7715 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7716 GRC_MODE_4X_NIC_SEND_RINGS |
7717 GRC_MODE_NO_TX_PHDR_CSUM |
7718 GRC_MODE_NO_RX_PHDR_CSUM);
7719 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7721 /* Pseudo-header checksum is done by hardware logic and not
7722 * the offload processers, so make the chip do the pseudo-
7723 * header checksums on receive. For transmit it is more
7724 * convenient to do the pseudo-header checksum in software
7725 * as Linux does that on transmit for us in all cases.
7727 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7731 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7733 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7734 val = tr32(GRC_MISC_CFG);
7736 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7737 tw32(GRC_MISC_CFG, val);
7739 /* Initialize MBUF/DESC pool. */
7740 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7742 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7743 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7745 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7747 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7748 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7749 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7751 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7754 fw_len = tp->fw_len;
7755 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7756 tw32(BUFMGR_MB_POOL_ADDR,
7757 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7758 tw32(BUFMGR_MB_POOL_SIZE,
7759 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7762 if (tp->dev->mtu <= ETH_DATA_LEN) {
7763 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7764 tp->bufmgr_config.mbuf_read_dma_low_water);
7765 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7766 tp->bufmgr_config.mbuf_mac_rx_low_water);
7767 tw32(BUFMGR_MB_HIGH_WATER,
7768 tp->bufmgr_config.mbuf_high_water);
7770 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7771 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7772 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7773 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7774 tw32(BUFMGR_MB_HIGH_WATER,
7775 tp->bufmgr_config.mbuf_high_water_jumbo);
7777 tw32(BUFMGR_DMA_LOW_WATER,
7778 tp->bufmgr_config.dma_low_water);
7779 tw32(BUFMGR_DMA_HIGH_WATER,
7780 tp->bufmgr_config.dma_high_water);
7782 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7783 for (i = 0; i < 2000; i++) {
7784 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7789 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7794 /* Setup replenish threshold. */
7795 val = tp->rx_pending / 8;
7798 else if (val > tp->rx_std_max_post)
7799 val = tp->rx_std_max_post;
7800 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7801 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7802 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7804 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7805 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7808 tw32(RCVBDI_STD_THRESH, val);
7810 /* Initialize TG3_BDINFO's at:
7811 * RCVDBDI_STD_BD: standard eth size rx ring
7812 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7813 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7816 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7817 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7818 * ring attribute flags
7819 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7821 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7822 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7824 * The size of each ring is fixed in the firmware, but the location is
7827 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7828 ((u64) tpr->rx_std_mapping >> 32));
7829 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7830 ((u64) tpr->rx_std_mapping & 0xffffffff));
7831 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7832 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7833 NIC_SRAM_RX_BUFFER_DESC);
7835 /* Disable the mini ring */
7836 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7837 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7838 BDINFO_FLAGS_DISABLED);
7840 /* Program the jumbo buffer descriptor ring control
7841 * blocks on those devices that have them.
7843 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7844 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7845 /* Setup replenish threshold. */
7846 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7848 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7849 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7850 ((u64) tpr->rx_jmb_mapping >> 32));
7851 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7852 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7853 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7854 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7855 BDINFO_FLAGS_USE_EXT_RECV);
7856 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7857 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7858 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7860 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7861 BDINFO_FLAGS_DISABLED);
7864 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7865 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7866 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7867 (RX_STD_MAX_SIZE << 2);
7869 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7871 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7873 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7875 tpr->rx_std_prod_idx = tp->rx_pending;
7876 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7878 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7879 tp->rx_jumbo_pending : 0;
7880 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7884 tw32(STD_REPLENISH_LWM, 32);
7885 tw32(JMB_REPLENISH_LWM, 16);
7888 tg3_rings_reset(tp);
7890 /* Initialize MAC address and backoff seed. */
7891 __tg3_set_mac_addr(tp, 0);
7893 /* MTU + ethernet header + FCS + optional VLAN tag */
7894 tw32(MAC_RX_MTU_SIZE,
7895 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7897 /* The slot time is changed by tg3_setup_phy if we
7898 * run at gigabit with half duplex.
7900 tw32(MAC_TX_LENGTHS,
7901 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7902 (6 << TX_LENGTHS_IPG_SHIFT) |
7903 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7905 /* Receive rules. */
7906 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7907 tw32(RCVLPC_CONFIG, 0x0181);
7909 /* Calculate RDMAC_MODE setting early, we need it to determine
7910 * the RCVLPC_STATE_ENABLE mask.
7912 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7913 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7914 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7915 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7916 RDMAC_MODE_LNGREAD_ENAB);
7918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7921 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7922 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7923 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7925 /* If statement applies to 5705 and 5750 PCI devices only */
7926 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7927 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7928 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7929 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7931 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7932 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7933 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7934 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7938 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7939 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7941 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7942 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7944 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7947 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7949 /* Receive/send statistics. */
7950 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7951 val = tr32(RCVLPC_STATS_ENABLE);
7952 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7953 tw32(RCVLPC_STATS_ENABLE, val);
7954 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7955 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7956 val = tr32(RCVLPC_STATS_ENABLE);
7957 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7958 tw32(RCVLPC_STATS_ENABLE, val);
7960 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7962 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7963 tw32(SNDDATAI_STATSENAB, 0xffffff);
7964 tw32(SNDDATAI_STATSCTRL,
7965 (SNDDATAI_SCTRL_ENABLE |
7966 SNDDATAI_SCTRL_FASTUPD));
7968 /* Setup host coalescing engine. */
7969 tw32(HOSTCC_MODE, 0);
7970 for (i = 0; i < 2000; i++) {
7971 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7976 __tg3_set_coalesce(tp, &tp->coal);
7978 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7979 /* Status/statistics block address. See tg3_timer,
7980 * the tg3_periodic_fetch_stats call there, and
7981 * tg3_get_stats to see how this works for 5705/5750 chips.
7983 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7984 ((u64) tp->stats_mapping >> 32));
7985 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7986 ((u64) tp->stats_mapping & 0xffffffff));
7987 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7989 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7991 /* Clear statistics and status block memory areas */
7992 for (i = NIC_SRAM_STATS_BLK;
7993 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7995 tg3_write_mem(tp, i, 0);
8000 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8002 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8003 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8004 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8005 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8007 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8008 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
8009 /* reset to prevent losing 1st rx packet intermittently */
8010 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8014 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8015 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8018 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8019 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8020 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8021 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8022 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8023 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8024 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8027 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8028 * If TG3_FLG2_IS_NIC is zero, we should read the
8029 * register to preserve the GPIO settings for LOMs. The GPIOs,
8030 * whether used as inputs or outputs, are set by boot code after
8033 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8036 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8037 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8038 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8041 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8042 GRC_LCLCTRL_GPIO_OUTPUT3;
8044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8045 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8047 tp->grc_local_ctrl &= ~gpio_mask;
8048 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8050 /* GPIO1 must be driven high for eeprom write protect */
8051 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8052 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8053 GRC_LCLCTRL_GPIO_OUTPUT1);
8055 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8058 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8059 val = tr32(MSGINT_MODE);
8060 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8061 tw32(MSGINT_MODE, val);
8064 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8065 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8069 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8070 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8071 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8072 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8073 WDMAC_MODE_LNGREAD_ENAB);
8075 /* If statement applies to 5705 and 5750 PCI devices only */
8076 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8077 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8079 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8080 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8081 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8083 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8084 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8085 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8086 val |= WDMAC_MODE_RX_ACCEL;
8090 /* Enable host coalescing bug fix */
8091 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8092 val |= WDMAC_MODE_STATUS_TAG_FIX;
8094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8095 val |= WDMAC_MODE_BURST_ALL_DATA;
8097 tw32_f(WDMAC_MODE, val);
8100 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8103 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8106 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8107 pcix_cmd |= PCI_X_CMD_READ_2K;
8108 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8109 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8110 pcix_cmd |= PCI_X_CMD_READ_2K;
8112 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8116 tw32_f(RDMAC_MODE, rdmac_mode);
8119 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8120 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8121 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8125 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8127 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8129 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8130 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8131 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8132 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8133 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8134 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8135 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8136 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8137 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8138 tw32(SNDBDI_MODE, val);
8139 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8141 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8142 err = tg3_load_5701_a0_firmware_fix(tp);
8147 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8148 err = tg3_load_tso_firmware(tp);
8153 tp->tx_mode = TX_MODE_ENABLE;
8154 tw32_f(MAC_TX_MODE, tp->tx_mode);
8157 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8158 u32 reg = MAC_RSS_INDIR_TBL_0;
8159 u8 *ent = (u8 *)&val;
8161 /* Setup the indirection table */
8162 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8163 int idx = i % sizeof(val);
8165 ent[idx] = i % (tp->irq_cnt - 1);
8166 if (idx == sizeof(val) - 1) {
8172 /* Setup the "secret" hash key. */
8173 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8174 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8175 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8176 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8177 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8178 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8179 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8180 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8181 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8182 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8185 tp->rx_mode = RX_MODE_ENABLE;
8186 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8187 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8189 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8190 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8191 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8192 RX_MODE_RSS_IPV6_HASH_EN |
8193 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8194 RX_MODE_RSS_IPV4_HASH_EN |
8195 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8197 tw32_f(MAC_RX_MODE, tp->rx_mode);
8200 tw32(MAC_LED_CTRL, tp->led_ctrl);
8202 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8203 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8204 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8207 tw32_f(MAC_RX_MODE, tp->rx_mode);
8210 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8211 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8212 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
8213 /* Set drive transmission level to 1.2V */
8214 /* only if the signal pre-emphasis bit is not set */
8215 val = tr32(MAC_SERDES_CFG);
8218 tw32(MAC_SERDES_CFG, val);
8220 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8221 tw32(MAC_SERDES_CFG, 0x616000);
8224 /* Prevent chip from dropping frames when flow control
8227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8231 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8234 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8235 /* Use hardware link auto-negotiation */
8236 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8239 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8240 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8243 tmp = tr32(SERDES_RX_CTRL);
8244 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8245 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8246 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8247 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8250 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8251 if (tp->link_config.phy_is_low_power) {
8252 tp->link_config.phy_is_low_power = 0;
8253 tp->link_config.speed = tp->link_config.orig_speed;
8254 tp->link_config.duplex = tp->link_config.orig_duplex;
8255 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8258 err = tg3_setup_phy(tp, 0);
8262 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8263 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
8266 /* Clear CRC stats. */
8267 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8268 tg3_writephy(tp, MII_TG3_TEST1,
8269 tmp | MII_TG3_TEST1_CRC_EN);
8270 tg3_readphy(tp, 0x14, &tmp);
8275 __tg3_set_rx_mode(tp->dev);
8277 /* Initialize receive rules. */
8278 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8279 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8280 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8281 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8283 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8284 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8288 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8292 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8294 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8296 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8298 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8300 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8302 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8304 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8306 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8308 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8310 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8312 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8314 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8316 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8318 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8326 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8327 /* Write our heartbeat update interval to APE. */
8328 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8329 APE_HOST_HEARTBEAT_INT_DISABLE);
8331 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8336 /* Called at device open time to get the chip ready for
8337 * packet processing. Invoked with tp->lock held.
8339 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8341 tg3_switch_clocks(tp);
8343 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8345 return tg3_reset_hw(tp, reset_phy);
8348 #define TG3_STAT_ADD32(PSTAT, REG) \
8349 do { u32 __val = tr32(REG); \
8350 (PSTAT)->low += __val; \
8351 if ((PSTAT)->low < __val) \
8352 (PSTAT)->high += 1; \
8355 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8357 struct tg3_hw_stats *sp = tp->hw_stats;
8359 if (!netif_carrier_ok(tp->dev))
8362 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8363 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8364 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8365 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8366 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8367 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8368 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8369 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8370 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8371 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8372 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8373 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8374 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8376 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8377 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8378 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8379 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8380 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8381 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8382 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8383 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8384 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8385 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8386 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8387 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8388 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8389 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8391 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8392 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8393 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8396 static void tg3_timer(unsigned long __opaque)
8398 struct tg3 *tp = (struct tg3 *) __opaque;
8403 spin_lock(&tp->lock);
8405 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8406 /* All of this garbage is because when using non-tagged
8407 * IRQ status the mailbox/status_block protocol the chip
8408 * uses with the cpu is race prone.
8410 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8411 tw32(GRC_LOCAL_CTRL,
8412 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8414 tw32(HOSTCC_MODE, tp->coalesce_mode |
8415 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8418 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8419 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8420 spin_unlock(&tp->lock);
8421 schedule_work(&tp->reset_task);
8426 /* This part only runs once per second. */
8427 if (!--tp->timer_counter) {
8428 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8429 tg3_periodic_fetch_stats(tp);
8431 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8435 mac_stat = tr32(MAC_STATUS);
8438 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8439 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8441 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8445 tg3_setup_phy(tp, 0);
8446 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8447 u32 mac_stat = tr32(MAC_STATUS);
8450 if (netif_carrier_ok(tp->dev) &&
8451 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8454 if (! netif_carrier_ok(tp->dev) &&
8455 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8456 MAC_STATUS_SIGNAL_DET))) {
8460 if (!tp->serdes_counter) {
8463 ~MAC_MODE_PORT_MODE_MASK));
8465 tw32_f(MAC_MODE, tp->mac_mode);
8468 tg3_setup_phy(tp, 0);
8470 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8471 tg3_serdes_parallel_detect(tp);
8473 tp->timer_counter = tp->timer_multiplier;
8476 /* Heartbeat is only sent once every 2 seconds.
8478 * The heartbeat is to tell the ASF firmware that the host
8479 * driver is still alive. In the event that the OS crashes,
8480 * ASF needs to reset the hardware to free up the FIFO space
8481 * that may be filled with rx packets destined for the host.
8482 * If the FIFO is full, ASF will no longer function properly.
8484 * Unintended resets have been reported on real time kernels
8485 * where the timer doesn't run on time. Netpoll will also have
8488 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8489 * to check the ring condition when the heartbeat is expiring
8490 * before doing the reset. This will prevent most unintended
8493 if (!--tp->asf_counter) {
8494 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8495 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8496 tg3_wait_for_event_ack(tp);
8498 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8499 FWCMD_NICDRV_ALIVE3);
8500 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8501 /* 5 seconds timeout */
8502 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8504 tg3_generate_fw_event(tp);
8506 tp->asf_counter = tp->asf_multiplier;
8509 spin_unlock(&tp->lock);
8512 tp->timer.expires = jiffies + tp->timer_offset;
8513 add_timer(&tp->timer);
8516 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8519 unsigned long flags;
8521 struct tg3_napi *tnapi = &tp->napi[irq_num];
8523 if (tp->irq_cnt == 1)
8524 name = tp->dev->name;
8526 name = &tnapi->irq_lbl[0];
8527 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8528 name[IFNAMSIZ-1] = 0;
8531 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8533 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8535 flags = IRQF_SAMPLE_RANDOM;
8538 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8539 fn = tg3_interrupt_tagged;
8540 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8543 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8546 static int tg3_test_interrupt(struct tg3 *tp)
8548 struct tg3_napi *tnapi = &tp->napi[0];
8549 struct net_device *dev = tp->dev;
8550 int err, i, intr_ok = 0;
8553 if (!netif_running(dev))
8556 tg3_disable_ints(tp);
8558 free_irq(tnapi->irq_vec, tnapi);
8561 * Turn off MSI one shot mode. Otherwise this test has no
8562 * observable way to know whether the interrupt was delivered.
8564 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8565 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8566 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8567 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8568 tw32(MSGINT_MODE, val);
8571 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8572 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8576 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8577 tg3_enable_ints(tp);
8579 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8582 for (i = 0; i < 5; i++) {
8583 u32 int_mbox, misc_host_ctrl;
8585 int_mbox = tr32_mailbox(tnapi->int_mbox);
8586 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8588 if ((int_mbox != 0) ||
8589 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8597 tg3_disable_ints(tp);
8599 free_irq(tnapi->irq_vec, tnapi);
8601 err = tg3_request_irq(tp, 0);
8607 /* Reenable MSI one shot mode. */
8608 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8610 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8611 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8612 tw32(MSGINT_MODE, val);
8620 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8621 * successfully restored
8623 static int tg3_test_msi(struct tg3 *tp)
8628 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8631 /* Turn off SERR reporting in case MSI terminates with Master
8634 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8635 pci_write_config_word(tp->pdev, PCI_COMMAND,
8636 pci_cmd & ~PCI_COMMAND_SERR);
8638 err = tg3_test_interrupt(tp);
8640 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8645 /* other failures */
8649 /* MSI test failed, go back to INTx mode */
8650 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8651 "switching to INTx mode. Please report this failure to "
8652 "the PCI maintainer and include system chipset information.\n",
8655 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8657 pci_disable_msi(tp->pdev);
8659 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8661 err = tg3_request_irq(tp, 0);
8665 /* Need to reset the chip because the MSI cycle may have terminated
8666 * with Master Abort.
8668 tg3_full_lock(tp, 1);
8670 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8671 err = tg3_init_hw(tp, 1);
8673 tg3_full_unlock(tp);
8676 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8681 static int tg3_request_firmware(struct tg3 *tp)
8683 const __be32 *fw_data;
8685 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8686 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8687 tp->dev->name, tp->fw_needed);
8691 fw_data = (void *)tp->fw->data;
8693 /* Firmware blob starts with version numbers, followed by
8694 * start address and _full_ length including BSS sections
8695 * (which must be longer than the actual data, of course
8698 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8699 if (tp->fw_len < (tp->fw->size - 12)) {
8700 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8701 tp->dev->name, tp->fw_len, tp->fw_needed);
8702 release_firmware(tp->fw);
8707 /* We no longer need firmware; we have it. */
8708 tp->fw_needed = NULL;
8712 static bool tg3_enable_msix(struct tg3 *tp)
8714 int i, rc, cpus = num_online_cpus();
8715 struct msix_entry msix_ent[tp->irq_max];
8718 /* Just fallback to the simpler MSI mode. */
8722 * We want as many rx rings enabled as there are cpus.
8723 * The first MSIX vector only deals with link interrupts, etc,
8724 * so we add one to the number of vectors we are requesting.
8726 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8728 for (i = 0; i < tp->irq_max; i++) {
8729 msix_ent[i].entry = i;
8730 msix_ent[i].vector = 0;
8733 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8735 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8737 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8740 "%s: Requested %d MSI-X vectors, received %d\n",
8741 tp->dev->name, tp->irq_cnt, rc);
8745 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8747 for (i = 0; i < tp->irq_max; i++)
8748 tp->napi[i].irq_vec = msix_ent[i].vector;
8750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
8751 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8752 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8754 tp->dev->real_num_tx_queues = 1;
8759 static void tg3_ints_init(struct tg3 *tp)
8761 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8762 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8763 /* All MSI supporting chips should support tagged
8764 * status. Assert that this is the case.
8766 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8767 "Not using MSI.\n", tp->dev->name);
8771 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8772 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8773 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8774 pci_enable_msi(tp->pdev) == 0)
8775 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8777 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8778 u32 msi_mode = tr32(MSGINT_MODE);
8779 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8780 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8781 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8784 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8786 tp->napi[0].irq_vec = tp->pdev->irq;
8787 tp->dev->real_num_tx_queues = 1;
8791 static void tg3_ints_fini(struct tg3 *tp)
8793 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8794 pci_disable_msix(tp->pdev);
8795 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8796 pci_disable_msi(tp->pdev);
8797 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8798 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8801 static int tg3_open(struct net_device *dev)
8803 struct tg3 *tp = netdev_priv(dev);
8806 if (tp->fw_needed) {
8807 err = tg3_request_firmware(tp);
8808 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8812 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8814 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8815 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8816 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8818 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8822 netif_carrier_off(tp->dev);
8824 err = tg3_set_power_state(tp, PCI_D0);
8828 tg3_full_lock(tp, 0);
8830 tg3_disable_ints(tp);
8831 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8833 tg3_full_unlock(tp);
8836 * Setup interrupts first so we know how
8837 * many NAPI resources to allocate
8841 /* The placement of this call is tied
8842 * to the setup and use of Host TX descriptors.
8844 err = tg3_alloc_consistent(tp);
8848 tg3_napi_enable(tp);
8850 for (i = 0; i < tp->irq_cnt; i++) {
8851 struct tg3_napi *tnapi = &tp->napi[i];
8852 err = tg3_request_irq(tp, i);
8854 for (i--; i >= 0; i--)
8855 free_irq(tnapi->irq_vec, tnapi);
8863 tg3_full_lock(tp, 0);
8865 err = tg3_init_hw(tp, 1);
8867 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8870 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8871 tp->timer_offset = HZ;
8873 tp->timer_offset = HZ / 10;
8875 BUG_ON(tp->timer_offset > HZ);
8876 tp->timer_counter = tp->timer_multiplier =
8877 (HZ / tp->timer_offset);
8878 tp->asf_counter = tp->asf_multiplier =
8879 ((HZ / tp->timer_offset) * 2);
8881 init_timer(&tp->timer);
8882 tp->timer.expires = jiffies + tp->timer_offset;
8883 tp->timer.data = (unsigned long) tp;
8884 tp->timer.function = tg3_timer;
8887 tg3_full_unlock(tp);
8892 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8893 err = tg3_test_msi(tp);
8896 tg3_full_lock(tp, 0);
8897 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8899 tg3_full_unlock(tp);
8904 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8906 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8907 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8908 u32 val = tr32(PCIE_TRANSACTION_CFG);
8910 tw32(PCIE_TRANSACTION_CFG,
8911 val | PCIE_TRANS_CFG_1SHOT_MSI);
8917 tg3_full_lock(tp, 0);
8919 add_timer(&tp->timer);
8920 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8921 tg3_enable_ints(tp);
8923 tg3_full_unlock(tp);
8925 netif_tx_start_all_queues(dev);
8930 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8931 struct tg3_napi *tnapi = &tp->napi[i];
8932 free_irq(tnapi->irq_vec, tnapi);
8936 tg3_napi_disable(tp);
8937 tg3_free_consistent(tp);
8945 /*static*/ void tg3_dump_state(struct tg3 *tp)
8947 u32 val32, val32_2, val32_3, val32_4, val32_5;
8950 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8952 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8953 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8954 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8958 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8959 tr32(MAC_MODE), tr32(MAC_STATUS));
8960 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8961 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8962 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8963 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8964 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8965 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8967 /* Send data initiator control block */
8968 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8969 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8970 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8971 tr32(SNDDATAI_STATSCTRL));
8973 /* Send data completion control block */
8974 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8976 /* Send BD ring selector block */
8977 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8978 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8980 /* Send BD initiator control block */
8981 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8982 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8984 /* Send BD completion control block */
8985 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8987 /* Receive list placement control block */
8988 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8989 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8990 printk(" RCVLPC_STATSCTRL[%08x]\n",
8991 tr32(RCVLPC_STATSCTRL));
8993 /* Receive data and receive BD initiator control block */
8994 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8995 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8997 /* Receive data completion control block */
8998 printk("DEBUG: RCVDCC_MODE[%08x]\n",
9001 /* Receive BD initiator control block */
9002 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
9003 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
9005 /* Receive BD completion control block */
9006 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
9007 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
9009 /* Receive list selector control block */
9010 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
9011 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
9013 /* Mbuf cluster free block */
9014 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
9015 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
9017 /* Host coalescing control block */
9018 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
9019 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
9020 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
9021 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
9022 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
9023 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
9024 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
9025 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
9026 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
9027 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
9028 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
9029 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
9031 /* Memory arbiter control block */
9032 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
9033 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
9035 /* Buffer manager control block */
9036 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
9037 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
9038 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
9039 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
9040 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
9041 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
9042 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
9043 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
9045 /* Read DMA control block */
9046 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
9047 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
9049 /* Write DMA control block */
9050 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
9051 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
9053 /* DMA completion block */
9054 printk("DEBUG: DMAC_MODE[%08x]\n",
9058 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
9059 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
9060 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
9061 tr32(GRC_LOCAL_CTRL));
9064 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
9065 tr32(RCVDBDI_JUMBO_BD + 0x0),
9066 tr32(RCVDBDI_JUMBO_BD + 0x4),
9067 tr32(RCVDBDI_JUMBO_BD + 0x8),
9068 tr32(RCVDBDI_JUMBO_BD + 0xc));
9069 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
9070 tr32(RCVDBDI_STD_BD + 0x0),
9071 tr32(RCVDBDI_STD_BD + 0x4),
9072 tr32(RCVDBDI_STD_BD + 0x8),
9073 tr32(RCVDBDI_STD_BD + 0xc));
9074 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
9075 tr32(RCVDBDI_MINI_BD + 0x0),
9076 tr32(RCVDBDI_MINI_BD + 0x4),
9077 tr32(RCVDBDI_MINI_BD + 0x8),
9078 tr32(RCVDBDI_MINI_BD + 0xc));
9080 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
9081 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
9082 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
9083 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
9084 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
9085 val32, val32_2, val32_3, val32_4);
9087 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
9088 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
9089 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
9090 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
9091 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
9092 val32, val32_2, val32_3, val32_4);
9094 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
9095 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
9096 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
9097 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
9098 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
9099 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
9100 val32, val32_2, val32_3, val32_4, val32_5);
9102 /* SW status block */
9104 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
9107 sblk->rx_jumbo_consumer,
9109 sblk->rx_mini_consumer,
9110 sblk->idx[0].rx_producer,
9111 sblk->idx[0].tx_consumer);
9113 /* SW statistics block */
9114 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
9115 ((u32 *)tp->hw_stats)[0],
9116 ((u32 *)tp->hw_stats)[1],
9117 ((u32 *)tp->hw_stats)[2],
9118 ((u32 *)tp->hw_stats)[3]);
9121 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
9122 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
9123 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
9124 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
9125 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
9127 /* NIC side send descriptors. */
9128 for (i = 0; i < 6; i++) {
9131 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
9132 + (i * sizeof(struct tg3_tx_buffer_desc));
9133 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
9135 readl(txd + 0x0), readl(txd + 0x4),
9136 readl(txd + 0x8), readl(txd + 0xc));
9139 /* NIC side RX descriptors. */
9140 for (i = 0; i < 6; i++) {
9143 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
9144 + (i * sizeof(struct tg3_rx_buffer_desc));
9145 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
9147 readl(rxd + 0x0), readl(rxd + 0x4),
9148 readl(rxd + 0x8), readl(rxd + 0xc));
9149 rxd += (4 * sizeof(u32));
9150 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
9152 readl(rxd + 0x0), readl(rxd + 0x4),
9153 readl(rxd + 0x8), readl(rxd + 0xc));
9156 for (i = 0; i < 6; i++) {
9159 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
9160 + (i * sizeof(struct tg3_rx_buffer_desc));
9161 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
9163 readl(rxd + 0x0), readl(rxd + 0x4),
9164 readl(rxd + 0x8), readl(rxd + 0xc));
9165 rxd += (4 * sizeof(u32));
9166 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
9168 readl(rxd + 0x0), readl(rxd + 0x4),
9169 readl(rxd + 0x8), readl(rxd + 0xc));
9174 static struct net_device_stats *tg3_get_stats(struct net_device *);
9175 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9177 static int tg3_close(struct net_device *dev)
9180 struct tg3 *tp = netdev_priv(dev);
9182 tg3_napi_disable(tp);
9183 cancel_work_sync(&tp->reset_task);
9185 netif_tx_stop_all_queues(dev);
9187 del_timer_sync(&tp->timer);
9191 tg3_full_lock(tp, 1);
9196 tg3_disable_ints(tp);
9198 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9200 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9202 tg3_full_unlock(tp);
9204 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9205 struct tg3_napi *tnapi = &tp->napi[i];
9206 free_irq(tnapi->irq_vec, tnapi);
9211 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
9212 sizeof(tp->net_stats_prev));
9213 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9214 sizeof(tp->estats_prev));
9216 tg3_free_consistent(tp);
9218 tg3_set_power_state(tp, PCI_D3hot);
9220 netif_carrier_off(tp->dev);
9225 static inline unsigned long get_stat64(tg3_stat64_t *val)
9229 #if (BITS_PER_LONG == 32)
9232 ret = ((u64)val->high << 32) | ((u64)val->low);
9237 static inline u64 get_estat64(tg3_stat64_t *val)
9239 return ((u64)val->high << 32) | ((u64)val->low);
9242 static unsigned long calc_crc_errors(struct tg3 *tp)
9244 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9246 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9247 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9251 spin_lock_bh(&tp->lock);
9252 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9253 tg3_writephy(tp, MII_TG3_TEST1,
9254 val | MII_TG3_TEST1_CRC_EN);
9255 tg3_readphy(tp, 0x14, &val);
9258 spin_unlock_bh(&tp->lock);
9260 tp->phy_crc_errors += val;
9262 return tp->phy_crc_errors;
9265 return get_stat64(&hw_stats->rx_fcs_errors);
9268 #define ESTAT_ADD(member) \
9269 estats->member = old_estats->member + \
9270 get_estat64(&hw_stats->member)
9272 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9274 struct tg3_ethtool_stats *estats = &tp->estats;
9275 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9276 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9281 ESTAT_ADD(rx_octets);
9282 ESTAT_ADD(rx_fragments);
9283 ESTAT_ADD(rx_ucast_packets);
9284 ESTAT_ADD(rx_mcast_packets);
9285 ESTAT_ADD(rx_bcast_packets);
9286 ESTAT_ADD(rx_fcs_errors);
9287 ESTAT_ADD(rx_align_errors);
9288 ESTAT_ADD(rx_xon_pause_rcvd);
9289 ESTAT_ADD(rx_xoff_pause_rcvd);
9290 ESTAT_ADD(rx_mac_ctrl_rcvd);
9291 ESTAT_ADD(rx_xoff_entered);
9292 ESTAT_ADD(rx_frame_too_long_errors);
9293 ESTAT_ADD(rx_jabbers);
9294 ESTAT_ADD(rx_undersize_packets);
9295 ESTAT_ADD(rx_in_length_errors);
9296 ESTAT_ADD(rx_out_length_errors);
9297 ESTAT_ADD(rx_64_or_less_octet_packets);
9298 ESTAT_ADD(rx_65_to_127_octet_packets);
9299 ESTAT_ADD(rx_128_to_255_octet_packets);
9300 ESTAT_ADD(rx_256_to_511_octet_packets);
9301 ESTAT_ADD(rx_512_to_1023_octet_packets);
9302 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9303 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9304 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9305 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9306 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9308 ESTAT_ADD(tx_octets);
9309 ESTAT_ADD(tx_collisions);
9310 ESTAT_ADD(tx_xon_sent);
9311 ESTAT_ADD(tx_xoff_sent);
9312 ESTAT_ADD(tx_flow_control);
9313 ESTAT_ADD(tx_mac_errors);
9314 ESTAT_ADD(tx_single_collisions);
9315 ESTAT_ADD(tx_mult_collisions);
9316 ESTAT_ADD(tx_deferred);
9317 ESTAT_ADD(tx_excessive_collisions);
9318 ESTAT_ADD(tx_late_collisions);
9319 ESTAT_ADD(tx_collide_2times);
9320 ESTAT_ADD(tx_collide_3times);
9321 ESTAT_ADD(tx_collide_4times);
9322 ESTAT_ADD(tx_collide_5times);
9323 ESTAT_ADD(tx_collide_6times);
9324 ESTAT_ADD(tx_collide_7times);
9325 ESTAT_ADD(tx_collide_8times);
9326 ESTAT_ADD(tx_collide_9times);
9327 ESTAT_ADD(tx_collide_10times);
9328 ESTAT_ADD(tx_collide_11times);
9329 ESTAT_ADD(tx_collide_12times);
9330 ESTAT_ADD(tx_collide_13times);
9331 ESTAT_ADD(tx_collide_14times);
9332 ESTAT_ADD(tx_collide_15times);
9333 ESTAT_ADD(tx_ucast_packets);
9334 ESTAT_ADD(tx_mcast_packets);
9335 ESTAT_ADD(tx_bcast_packets);
9336 ESTAT_ADD(tx_carrier_sense_errors);
9337 ESTAT_ADD(tx_discards);
9338 ESTAT_ADD(tx_errors);
9340 ESTAT_ADD(dma_writeq_full);
9341 ESTAT_ADD(dma_write_prioq_full);
9342 ESTAT_ADD(rxbds_empty);
9343 ESTAT_ADD(rx_discards);
9344 ESTAT_ADD(rx_errors);
9345 ESTAT_ADD(rx_threshold_hit);
9347 ESTAT_ADD(dma_readq_full);
9348 ESTAT_ADD(dma_read_prioq_full);
9349 ESTAT_ADD(tx_comp_queue_full);
9351 ESTAT_ADD(ring_set_send_prod_index);
9352 ESTAT_ADD(ring_status_update);
9353 ESTAT_ADD(nic_irqs);
9354 ESTAT_ADD(nic_avoided_irqs);
9355 ESTAT_ADD(nic_tx_threshold_hit);
9360 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
9362 struct tg3 *tp = netdev_priv(dev);
9363 struct net_device_stats *stats = &tp->net_stats;
9364 struct net_device_stats *old_stats = &tp->net_stats_prev;
9365 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9370 stats->rx_packets = old_stats->rx_packets +
9371 get_stat64(&hw_stats->rx_ucast_packets) +
9372 get_stat64(&hw_stats->rx_mcast_packets) +
9373 get_stat64(&hw_stats->rx_bcast_packets);
9375 stats->tx_packets = old_stats->tx_packets +
9376 get_stat64(&hw_stats->tx_ucast_packets) +
9377 get_stat64(&hw_stats->tx_mcast_packets) +
9378 get_stat64(&hw_stats->tx_bcast_packets);
9380 stats->rx_bytes = old_stats->rx_bytes +
9381 get_stat64(&hw_stats->rx_octets);
9382 stats->tx_bytes = old_stats->tx_bytes +
9383 get_stat64(&hw_stats->tx_octets);
9385 stats->rx_errors = old_stats->rx_errors +
9386 get_stat64(&hw_stats->rx_errors);
9387 stats->tx_errors = old_stats->tx_errors +
9388 get_stat64(&hw_stats->tx_errors) +
9389 get_stat64(&hw_stats->tx_mac_errors) +
9390 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9391 get_stat64(&hw_stats->tx_discards);
9393 stats->multicast = old_stats->multicast +
9394 get_stat64(&hw_stats->rx_mcast_packets);
9395 stats->collisions = old_stats->collisions +
9396 get_stat64(&hw_stats->tx_collisions);
9398 stats->rx_length_errors = old_stats->rx_length_errors +
9399 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9400 get_stat64(&hw_stats->rx_undersize_packets);
9402 stats->rx_over_errors = old_stats->rx_over_errors +
9403 get_stat64(&hw_stats->rxbds_empty);
9404 stats->rx_frame_errors = old_stats->rx_frame_errors +
9405 get_stat64(&hw_stats->rx_align_errors);
9406 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9407 get_stat64(&hw_stats->tx_discards);
9408 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9409 get_stat64(&hw_stats->tx_carrier_sense_errors);
9411 stats->rx_crc_errors = old_stats->rx_crc_errors +
9412 calc_crc_errors(tp);
9414 stats->rx_missed_errors = old_stats->rx_missed_errors +
9415 get_stat64(&hw_stats->rx_discards);
9420 static inline u32 calc_crc(unsigned char *buf, int len)
9428 for (j = 0; j < len; j++) {
9431 for (k = 0; k < 8; k++) {
9445 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9447 /* accept or reject all multicast frames */
9448 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9449 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9450 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9451 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9454 static void __tg3_set_rx_mode(struct net_device *dev)
9456 struct tg3 *tp = netdev_priv(dev);
9459 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9460 RX_MODE_KEEP_VLAN_TAG);
9462 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9465 #if TG3_VLAN_TAG_USED
9467 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9468 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9470 /* By definition, VLAN is disabled always in this
9473 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9474 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9477 if (dev->flags & IFF_PROMISC) {
9478 /* Promiscuous mode. */
9479 rx_mode |= RX_MODE_PROMISC;
9480 } else if (dev->flags & IFF_ALLMULTI) {
9481 /* Accept all multicast. */
9482 tg3_set_multi (tp, 1);
9483 } else if (netdev_mc_empty(dev)) {
9484 /* Reject all multicast. */
9485 tg3_set_multi (tp, 0);
9487 /* Accept one or more multicast(s). */
9488 struct dev_mc_list *mclist;
9490 u32 mc_filter[4] = { 0, };
9495 for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
9496 i++, mclist = mclist->next) {
9498 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
9500 regidx = (bit & 0x60) >> 5;
9502 mc_filter[regidx] |= (1 << bit);
9505 tw32(MAC_HASH_REG_0, mc_filter[0]);
9506 tw32(MAC_HASH_REG_1, mc_filter[1]);
9507 tw32(MAC_HASH_REG_2, mc_filter[2]);
9508 tw32(MAC_HASH_REG_3, mc_filter[3]);
9511 if (rx_mode != tp->rx_mode) {
9512 tp->rx_mode = rx_mode;
9513 tw32_f(MAC_RX_MODE, rx_mode);
9518 static void tg3_set_rx_mode(struct net_device *dev)
9520 struct tg3 *tp = netdev_priv(dev);
9522 if (!netif_running(dev))
9525 tg3_full_lock(tp, 0);
9526 __tg3_set_rx_mode(dev);
9527 tg3_full_unlock(tp);
9530 #define TG3_REGDUMP_LEN (32 * 1024)
9532 static int tg3_get_regs_len(struct net_device *dev)
9534 return TG3_REGDUMP_LEN;
9537 static void tg3_get_regs(struct net_device *dev,
9538 struct ethtool_regs *regs, void *_p)
9541 struct tg3 *tp = netdev_priv(dev);
9547 memset(p, 0, TG3_REGDUMP_LEN);
9549 if (tp->link_config.phy_is_low_power)
9552 tg3_full_lock(tp, 0);
9554 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9555 #define GET_REG32_LOOP(base,len) \
9556 do { p = (u32 *)(orig_p + (base)); \
9557 for (i = 0; i < len; i += 4) \
9558 __GET_REG32((base) + i); \
9560 #define GET_REG32_1(reg) \
9561 do { p = (u32 *)(orig_p + (reg)); \
9562 __GET_REG32((reg)); \
9565 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9566 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9567 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9568 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9569 GET_REG32_1(SNDDATAC_MODE);
9570 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9571 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9572 GET_REG32_1(SNDBDC_MODE);
9573 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9574 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9575 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9576 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9577 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9578 GET_REG32_1(RCVDCC_MODE);
9579 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9580 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9581 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9582 GET_REG32_1(MBFREE_MODE);
9583 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9584 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9585 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9586 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9587 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9588 GET_REG32_1(RX_CPU_MODE);
9589 GET_REG32_1(RX_CPU_STATE);
9590 GET_REG32_1(RX_CPU_PGMCTR);
9591 GET_REG32_1(RX_CPU_HWBKPT);
9592 GET_REG32_1(TX_CPU_MODE);
9593 GET_REG32_1(TX_CPU_STATE);
9594 GET_REG32_1(TX_CPU_PGMCTR);
9595 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9596 GET_REG32_LOOP(FTQ_RESET, 0x120);
9597 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9598 GET_REG32_1(DMAC_MODE);
9599 GET_REG32_LOOP(GRC_MODE, 0x4c);
9600 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9601 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9604 #undef GET_REG32_LOOP
9607 tg3_full_unlock(tp);
9610 static int tg3_get_eeprom_len(struct net_device *dev)
9612 struct tg3 *tp = netdev_priv(dev);
9614 return tp->nvram_size;
9617 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9619 struct tg3 *tp = netdev_priv(dev);
9622 u32 i, offset, len, b_offset, b_count;
9625 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9628 if (tp->link_config.phy_is_low_power)
9631 offset = eeprom->offset;
9635 eeprom->magic = TG3_EEPROM_MAGIC;
9638 /* adjustments to start on required 4 byte boundary */
9639 b_offset = offset & 3;
9640 b_count = 4 - b_offset;
9641 if (b_count > len) {
9642 /* i.e. offset=1 len=2 */
9645 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9648 memcpy(data, ((char*)&val) + b_offset, b_count);
9651 eeprom->len += b_count;
9654 /* read bytes upto the last 4 byte boundary */
9655 pd = &data[eeprom->len];
9656 for (i = 0; i < (len - (len & 3)); i += 4) {
9657 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9662 memcpy(pd + i, &val, 4);
9667 /* read last bytes not ending on 4 byte boundary */
9668 pd = &data[eeprom->len];
9670 b_offset = offset + len - b_count;
9671 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9674 memcpy(pd, &val, b_count);
9675 eeprom->len += b_count;
9680 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9682 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9684 struct tg3 *tp = netdev_priv(dev);
9686 u32 offset, len, b_offset, odd_len;
9690 if (tp->link_config.phy_is_low_power)
9693 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9694 eeprom->magic != TG3_EEPROM_MAGIC)
9697 offset = eeprom->offset;
9700 if ((b_offset = (offset & 3))) {
9701 /* adjustments to start on required 4 byte boundary */
9702 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9713 /* adjustments to end on required 4 byte boundary */
9715 len = (len + 3) & ~3;
9716 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9722 if (b_offset || odd_len) {
9723 buf = kmalloc(len, GFP_KERNEL);
9727 memcpy(buf, &start, 4);
9729 memcpy(buf+len-4, &end, 4);
9730 memcpy(buf + b_offset, data, eeprom->len);
9733 ret = tg3_nvram_write_block(tp, offset, len, buf);
9741 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9743 struct tg3 *tp = netdev_priv(dev);
9745 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9746 struct phy_device *phydev;
9747 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9749 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9750 return phy_ethtool_gset(phydev, cmd);
9753 cmd->supported = (SUPPORTED_Autoneg);
9755 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9756 cmd->supported |= (SUPPORTED_1000baseT_Half |
9757 SUPPORTED_1000baseT_Full);
9759 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9760 cmd->supported |= (SUPPORTED_100baseT_Half |
9761 SUPPORTED_100baseT_Full |
9762 SUPPORTED_10baseT_Half |
9763 SUPPORTED_10baseT_Full |
9765 cmd->port = PORT_TP;
9767 cmd->supported |= SUPPORTED_FIBRE;
9768 cmd->port = PORT_FIBRE;
9771 cmd->advertising = tp->link_config.advertising;
9772 if (netif_running(dev)) {
9773 cmd->speed = tp->link_config.active_speed;
9774 cmd->duplex = tp->link_config.active_duplex;
9776 cmd->phy_address = tp->phy_addr;
9777 cmd->transceiver = XCVR_INTERNAL;
9778 cmd->autoneg = tp->link_config.autoneg;
9784 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9786 struct tg3 *tp = netdev_priv(dev);
9788 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9789 struct phy_device *phydev;
9790 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9792 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9793 return phy_ethtool_sset(phydev, cmd);
9796 if (cmd->autoneg != AUTONEG_ENABLE &&
9797 cmd->autoneg != AUTONEG_DISABLE)
9800 if (cmd->autoneg == AUTONEG_DISABLE &&
9801 cmd->duplex != DUPLEX_FULL &&
9802 cmd->duplex != DUPLEX_HALF)
9805 if (cmd->autoneg == AUTONEG_ENABLE) {
9806 u32 mask = ADVERTISED_Autoneg |
9808 ADVERTISED_Asym_Pause;
9810 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9811 mask |= ADVERTISED_1000baseT_Half |
9812 ADVERTISED_1000baseT_Full;
9814 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9815 mask |= ADVERTISED_100baseT_Half |
9816 ADVERTISED_100baseT_Full |
9817 ADVERTISED_10baseT_Half |
9818 ADVERTISED_10baseT_Full |
9821 mask |= ADVERTISED_FIBRE;
9823 if (cmd->advertising & ~mask)
9826 mask &= (ADVERTISED_1000baseT_Half |
9827 ADVERTISED_1000baseT_Full |
9828 ADVERTISED_100baseT_Half |
9829 ADVERTISED_100baseT_Full |
9830 ADVERTISED_10baseT_Half |
9831 ADVERTISED_10baseT_Full);
9833 cmd->advertising &= mask;
9835 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9836 if (cmd->speed != SPEED_1000)
9839 if (cmd->duplex != DUPLEX_FULL)
9842 if (cmd->speed != SPEED_100 &&
9843 cmd->speed != SPEED_10)
9848 tg3_full_lock(tp, 0);
9850 tp->link_config.autoneg = cmd->autoneg;
9851 if (cmd->autoneg == AUTONEG_ENABLE) {
9852 tp->link_config.advertising = (cmd->advertising |
9853 ADVERTISED_Autoneg);
9854 tp->link_config.speed = SPEED_INVALID;
9855 tp->link_config.duplex = DUPLEX_INVALID;
9857 tp->link_config.advertising = 0;
9858 tp->link_config.speed = cmd->speed;
9859 tp->link_config.duplex = cmd->duplex;
9862 tp->link_config.orig_speed = tp->link_config.speed;
9863 tp->link_config.orig_duplex = tp->link_config.duplex;
9864 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9866 if (netif_running(dev))
9867 tg3_setup_phy(tp, 1);
9869 tg3_full_unlock(tp);
9874 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9876 struct tg3 *tp = netdev_priv(dev);
9878 strcpy(info->driver, DRV_MODULE_NAME);
9879 strcpy(info->version, DRV_MODULE_VERSION);
9880 strcpy(info->fw_version, tp->fw_ver);
9881 strcpy(info->bus_info, pci_name(tp->pdev));
9884 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9886 struct tg3 *tp = netdev_priv(dev);
9888 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9889 device_can_wakeup(&tp->pdev->dev))
9890 wol->supported = WAKE_MAGIC;
9894 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9895 device_can_wakeup(&tp->pdev->dev))
9896 wol->wolopts = WAKE_MAGIC;
9897 memset(&wol->sopass, 0, sizeof(wol->sopass));
9900 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9902 struct tg3 *tp = netdev_priv(dev);
9903 struct device *dp = &tp->pdev->dev;
9905 if (wol->wolopts & ~WAKE_MAGIC)
9907 if ((wol->wolopts & WAKE_MAGIC) &&
9908 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9911 spin_lock_bh(&tp->lock);
9912 if (wol->wolopts & WAKE_MAGIC) {
9913 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9914 device_set_wakeup_enable(dp, true);
9916 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9917 device_set_wakeup_enable(dp, false);
9919 spin_unlock_bh(&tp->lock);
9924 static u32 tg3_get_msglevel(struct net_device *dev)
9926 struct tg3 *tp = netdev_priv(dev);
9927 return tp->msg_enable;
9930 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9932 struct tg3 *tp = netdev_priv(dev);
9933 tp->msg_enable = value;
9936 static int tg3_set_tso(struct net_device *dev, u32 value)
9938 struct tg3 *tp = netdev_priv(dev);
9940 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9945 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9946 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9947 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9949 dev->features |= NETIF_F_TSO6;
9950 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9952 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9953 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9956 dev->features |= NETIF_F_TSO_ECN;
9958 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9960 return ethtool_op_set_tso(dev, value);
9963 static int tg3_nway_reset(struct net_device *dev)
9965 struct tg3 *tp = netdev_priv(dev);
9968 if (!netif_running(dev))
9971 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9974 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9975 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9977 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9981 spin_lock_bh(&tp->lock);
9983 tg3_readphy(tp, MII_BMCR, &bmcr);
9984 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9985 ((bmcr & BMCR_ANENABLE) ||
9986 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9987 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9991 spin_unlock_bh(&tp->lock);
9997 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9999 struct tg3 *tp = netdev_priv(dev);
10001 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
10002 ering->rx_mini_max_pending = 0;
10003 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10004 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
10006 ering->rx_jumbo_max_pending = 0;
10008 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10010 ering->rx_pending = tp->rx_pending;
10011 ering->rx_mini_pending = 0;
10012 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10013 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10015 ering->rx_jumbo_pending = 0;
10017 ering->tx_pending = tp->napi[0].tx_pending;
10020 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10022 struct tg3 *tp = netdev_priv(dev);
10023 int i, irq_sync = 0, err = 0;
10025 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
10026 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
10027 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10028 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10029 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10030 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10033 if (netif_running(dev)) {
10035 tg3_netif_stop(tp);
10039 tg3_full_lock(tp, irq_sync);
10041 tp->rx_pending = ering->rx_pending;
10043 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10044 tp->rx_pending > 63)
10045 tp->rx_pending = 63;
10046 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10048 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
10049 tp->napi[i].tx_pending = ering->tx_pending;
10051 if (netif_running(dev)) {
10052 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10053 err = tg3_restart_hw(tp, 1);
10055 tg3_netif_start(tp);
10058 tg3_full_unlock(tp);
10060 if (irq_sync && !err)
10066 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10068 struct tg3 *tp = netdev_priv(dev);
10070 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10072 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10073 epause->rx_pause = 1;
10075 epause->rx_pause = 0;
10077 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10078 epause->tx_pause = 1;
10080 epause->tx_pause = 0;
10083 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10085 struct tg3 *tp = netdev_priv(dev);
10088 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10089 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10092 if (epause->autoneg) {
10094 struct phy_device *phydev;
10096 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10098 if (epause->rx_pause) {
10099 if (epause->tx_pause)
10100 newadv = ADVERTISED_Pause;
10102 newadv = ADVERTISED_Pause |
10103 ADVERTISED_Asym_Pause;
10104 } else if (epause->tx_pause) {
10105 newadv = ADVERTISED_Asym_Pause;
10109 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
10110 u32 oldadv = phydev->advertising &
10111 (ADVERTISED_Pause |
10112 ADVERTISED_Asym_Pause);
10113 if (oldadv != newadv) {
10114 phydev->advertising &=
10115 ~(ADVERTISED_Pause |
10116 ADVERTISED_Asym_Pause);
10117 phydev->advertising |= newadv;
10118 err = phy_start_aneg(phydev);
10121 tp->link_config.advertising &=
10122 ~(ADVERTISED_Pause |
10123 ADVERTISED_Asym_Pause);
10124 tp->link_config.advertising |= newadv;
10127 if (epause->rx_pause)
10128 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10130 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10132 if (epause->tx_pause)
10133 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10135 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10137 if (netif_running(dev))
10138 tg3_setup_flow_control(tp, 0, 0);
10143 if (netif_running(dev)) {
10144 tg3_netif_stop(tp);
10148 tg3_full_lock(tp, irq_sync);
10150 if (epause->autoneg)
10151 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10153 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10154 if (epause->rx_pause)
10155 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10157 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10158 if (epause->tx_pause)
10159 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10161 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10163 if (netif_running(dev)) {
10164 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10165 err = tg3_restart_hw(tp, 1);
10167 tg3_netif_start(tp);
10170 tg3_full_unlock(tp);
10176 static u32 tg3_get_rx_csum(struct net_device *dev)
10178 struct tg3 *tp = netdev_priv(dev);
10179 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10182 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10184 struct tg3 *tp = netdev_priv(dev);
10186 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10192 spin_lock_bh(&tp->lock);
10194 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10196 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10197 spin_unlock_bh(&tp->lock);
10202 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10204 struct tg3 *tp = netdev_priv(dev);
10206 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10212 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10213 ethtool_op_set_tx_ipv6_csum(dev, data);
10215 ethtool_op_set_tx_csum(dev, data);
10220 static int tg3_get_sset_count (struct net_device *dev, int sset)
10224 return TG3_NUM_TEST;
10226 return TG3_NUM_STATS;
10228 return -EOPNOTSUPP;
10232 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
10234 switch (stringset) {
10236 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10239 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10242 WARN_ON(1); /* we need a WARN() */
10247 static int tg3_phys_id(struct net_device *dev, u32 data)
10249 struct tg3 *tp = netdev_priv(dev);
10252 if (!netif_running(tp->dev))
10256 data = UINT_MAX / 2;
10258 for (i = 0; i < (data * 2); i++) {
10260 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10261 LED_CTRL_1000MBPS_ON |
10262 LED_CTRL_100MBPS_ON |
10263 LED_CTRL_10MBPS_ON |
10264 LED_CTRL_TRAFFIC_OVERRIDE |
10265 LED_CTRL_TRAFFIC_BLINK |
10266 LED_CTRL_TRAFFIC_LED);
10269 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10270 LED_CTRL_TRAFFIC_OVERRIDE);
10272 if (msleep_interruptible(500))
10275 tw32(MAC_LED_CTRL, tp->led_ctrl);
10279 static void tg3_get_ethtool_stats (struct net_device *dev,
10280 struct ethtool_stats *estats, u64 *tmp_stats)
10282 struct tg3 *tp = netdev_priv(dev);
10283 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10286 #define NVRAM_TEST_SIZE 0x100
10287 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10288 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10289 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10290 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10291 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10293 static int tg3_test_nvram(struct tg3 *tp)
10297 int i, j, k, err = 0, size;
10299 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10302 if (tg3_nvram_read(tp, 0, &magic) != 0)
10305 if (magic == TG3_EEPROM_MAGIC)
10306 size = NVRAM_TEST_SIZE;
10307 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10308 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10309 TG3_EEPROM_SB_FORMAT_1) {
10310 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10311 case TG3_EEPROM_SB_REVISION_0:
10312 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10314 case TG3_EEPROM_SB_REVISION_2:
10315 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10317 case TG3_EEPROM_SB_REVISION_3:
10318 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10325 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10326 size = NVRAM_SELFBOOT_HW_SIZE;
10330 buf = kmalloc(size, GFP_KERNEL);
10335 for (i = 0, j = 0; i < size; i += 4, j++) {
10336 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10343 /* Selfboot format */
10344 magic = be32_to_cpu(buf[0]);
10345 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10346 TG3_EEPROM_MAGIC_FW) {
10347 u8 *buf8 = (u8 *) buf, csum8 = 0;
10349 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10350 TG3_EEPROM_SB_REVISION_2) {
10351 /* For rev 2, the csum doesn't include the MBA. */
10352 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10354 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10357 for (i = 0; i < size; i++)
10370 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10371 TG3_EEPROM_MAGIC_HW) {
10372 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10373 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10374 u8 *buf8 = (u8 *) buf;
10376 /* Separate the parity bits and the data bytes. */
10377 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10378 if ((i == 0) || (i == 8)) {
10382 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10383 parity[k++] = buf8[i] & msk;
10386 else if (i == 16) {
10390 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10391 parity[k++] = buf8[i] & msk;
10394 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10395 parity[k++] = buf8[i] & msk;
10398 data[j++] = buf8[i];
10402 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10403 u8 hw8 = hweight8(data[i]);
10405 if ((hw8 & 0x1) && parity[i])
10407 else if (!(hw8 & 0x1) && !parity[i])
10414 /* Bootstrap checksum at offset 0x10 */
10415 csum = calc_crc((unsigned char *) buf, 0x10);
10416 if (csum != be32_to_cpu(buf[0x10/4]))
10419 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10420 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10421 if (csum != be32_to_cpu(buf[0xfc/4]))
10431 #define TG3_SERDES_TIMEOUT_SEC 2
10432 #define TG3_COPPER_TIMEOUT_SEC 6
10434 static int tg3_test_link(struct tg3 *tp)
10438 if (!netif_running(tp->dev))
10441 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10442 max = TG3_SERDES_TIMEOUT_SEC;
10444 max = TG3_COPPER_TIMEOUT_SEC;
10446 for (i = 0; i < max; i++) {
10447 if (netif_carrier_ok(tp->dev))
10450 if (msleep_interruptible(1000))
10457 /* Only test the commonly used registers */
10458 static int tg3_test_registers(struct tg3 *tp)
10460 int i, is_5705, is_5750;
10461 u32 offset, read_mask, write_mask, val, save_val, read_val;
10465 #define TG3_FL_5705 0x1
10466 #define TG3_FL_NOT_5705 0x2
10467 #define TG3_FL_NOT_5788 0x4
10468 #define TG3_FL_NOT_5750 0x8
10472 /* MAC Control Registers */
10473 { MAC_MODE, TG3_FL_NOT_5705,
10474 0x00000000, 0x00ef6f8c },
10475 { MAC_MODE, TG3_FL_5705,
10476 0x00000000, 0x01ef6b8c },
10477 { MAC_STATUS, TG3_FL_NOT_5705,
10478 0x03800107, 0x00000000 },
10479 { MAC_STATUS, TG3_FL_5705,
10480 0x03800100, 0x00000000 },
10481 { MAC_ADDR_0_HIGH, 0x0000,
10482 0x00000000, 0x0000ffff },
10483 { MAC_ADDR_0_LOW, 0x0000,
10484 0x00000000, 0xffffffff },
10485 { MAC_RX_MTU_SIZE, 0x0000,
10486 0x00000000, 0x0000ffff },
10487 { MAC_TX_MODE, 0x0000,
10488 0x00000000, 0x00000070 },
10489 { MAC_TX_LENGTHS, 0x0000,
10490 0x00000000, 0x00003fff },
10491 { MAC_RX_MODE, TG3_FL_NOT_5705,
10492 0x00000000, 0x000007fc },
10493 { MAC_RX_MODE, TG3_FL_5705,
10494 0x00000000, 0x000007dc },
10495 { MAC_HASH_REG_0, 0x0000,
10496 0x00000000, 0xffffffff },
10497 { MAC_HASH_REG_1, 0x0000,
10498 0x00000000, 0xffffffff },
10499 { MAC_HASH_REG_2, 0x0000,
10500 0x00000000, 0xffffffff },
10501 { MAC_HASH_REG_3, 0x0000,
10502 0x00000000, 0xffffffff },
10504 /* Receive Data and Receive BD Initiator Control Registers. */
10505 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10506 0x00000000, 0xffffffff },
10507 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10508 0x00000000, 0xffffffff },
10509 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10510 0x00000000, 0x00000003 },
10511 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10512 0x00000000, 0xffffffff },
10513 { RCVDBDI_STD_BD+0, 0x0000,
10514 0x00000000, 0xffffffff },
10515 { RCVDBDI_STD_BD+4, 0x0000,
10516 0x00000000, 0xffffffff },
10517 { RCVDBDI_STD_BD+8, 0x0000,
10518 0x00000000, 0xffff0002 },
10519 { RCVDBDI_STD_BD+0xc, 0x0000,
10520 0x00000000, 0xffffffff },
10522 /* Receive BD Initiator Control Registers. */
10523 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10524 0x00000000, 0xffffffff },
10525 { RCVBDI_STD_THRESH, TG3_FL_5705,
10526 0x00000000, 0x000003ff },
10527 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10528 0x00000000, 0xffffffff },
10530 /* Host Coalescing Control Registers. */
10531 { HOSTCC_MODE, TG3_FL_NOT_5705,
10532 0x00000000, 0x00000004 },
10533 { HOSTCC_MODE, TG3_FL_5705,
10534 0x00000000, 0x000000f6 },
10535 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10536 0x00000000, 0xffffffff },
10537 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10538 0x00000000, 0x000003ff },
10539 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10540 0x00000000, 0xffffffff },
10541 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10542 0x00000000, 0x000003ff },
10543 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10544 0x00000000, 0xffffffff },
10545 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10546 0x00000000, 0x000000ff },
10547 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10548 0x00000000, 0xffffffff },
10549 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10550 0x00000000, 0x000000ff },
10551 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10552 0x00000000, 0xffffffff },
10553 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10554 0x00000000, 0xffffffff },
10555 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10556 0x00000000, 0xffffffff },
10557 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10558 0x00000000, 0x000000ff },
10559 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10560 0x00000000, 0xffffffff },
10561 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10562 0x00000000, 0x000000ff },
10563 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10564 0x00000000, 0xffffffff },
10565 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10566 0x00000000, 0xffffffff },
10567 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10568 0x00000000, 0xffffffff },
10569 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10570 0x00000000, 0xffffffff },
10571 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10572 0x00000000, 0xffffffff },
10573 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10574 0xffffffff, 0x00000000 },
10575 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10576 0xffffffff, 0x00000000 },
10578 /* Buffer Manager Control Registers. */
10579 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10580 0x00000000, 0x007fff80 },
10581 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10582 0x00000000, 0x007fffff },
10583 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10584 0x00000000, 0x0000003f },
10585 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10586 0x00000000, 0x000001ff },
10587 { BUFMGR_MB_HIGH_WATER, 0x0000,
10588 0x00000000, 0x000001ff },
10589 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10590 0xffffffff, 0x00000000 },
10591 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10592 0xffffffff, 0x00000000 },
10594 /* Mailbox Registers */
10595 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10596 0x00000000, 0x000001ff },
10597 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10598 0x00000000, 0x000001ff },
10599 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10600 0x00000000, 0x000007ff },
10601 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10602 0x00000000, 0x000001ff },
10604 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10607 is_5705 = is_5750 = 0;
10608 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10610 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10614 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10615 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10618 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10621 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10622 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10625 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10628 offset = (u32) reg_tbl[i].offset;
10629 read_mask = reg_tbl[i].read_mask;
10630 write_mask = reg_tbl[i].write_mask;
10632 /* Save the original register content */
10633 save_val = tr32(offset);
10635 /* Determine the read-only value. */
10636 read_val = save_val & read_mask;
10638 /* Write zero to the register, then make sure the read-only bits
10639 * are not changed and the read/write bits are all zeros.
10643 val = tr32(offset);
10645 /* Test the read-only and read/write bits. */
10646 if (((val & read_mask) != read_val) || (val & write_mask))
10649 /* Write ones to all the bits defined by RdMask and WrMask, then
10650 * make sure the read-only bits are not changed and the
10651 * read/write bits are all ones.
10653 tw32(offset, read_mask | write_mask);
10655 val = tr32(offset);
10657 /* Test the read-only bits. */
10658 if ((val & read_mask) != read_val)
10661 /* Test the read/write bits. */
10662 if ((val & write_mask) != write_mask)
10665 tw32(offset, save_val);
10671 if (netif_msg_hw(tp))
10672 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10674 tw32(offset, save_val);
10678 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10680 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10684 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10685 for (j = 0; j < len; j += 4) {
10688 tg3_write_mem(tp, offset + j, test_pattern[i]);
10689 tg3_read_mem(tp, offset + j, &val);
10690 if (val != test_pattern[i])
10697 static int tg3_test_memory(struct tg3 *tp)
10699 static struct mem_entry {
10702 } mem_tbl_570x[] = {
10703 { 0x00000000, 0x00b50},
10704 { 0x00002000, 0x1c000},
10705 { 0xffffffff, 0x00000}
10706 }, mem_tbl_5705[] = {
10707 { 0x00000100, 0x0000c},
10708 { 0x00000200, 0x00008},
10709 { 0x00004000, 0x00800},
10710 { 0x00006000, 0x01000},
10711 { 0x00008000, 0x02000},
10712 { 0x00010000, 0x0e000},
10713 { 0xffffffff, 0x00000}
10714 }, mem_tbl_5755[] = {
10715 { 0x00000200, 0x00008},
10716 { 0x00004000, 0x00800},
10717 { 0x00006000, 0x00800},
10718 { 0x00008000, 0x02000},
10719 { 0x00010000, 0x0c000},
10720 { 0xffffffff, 0x00000}
10721 }, mem_tbl_5906[] = {
10722 { 0x00000200, 0x00008},
10723 { 0x00004000, 0x00400},
10724 { 0x00006000, 0x00400},
10725 { 0x00008000, 0x01000},
10726 { 0x00010000, 0x01000},
10727 { 0xffffffff, 0x00000}
10728 }, mem_tbl_5717[] = {
10729 { 0x00000200, 0x00008},
10730 { 0x00010000, 0x0a000},
10731 { 0x00020000, 0x13c00},
10732 { 0xffffffff, 0x00000}
10733 }, mem_tbl_57765[] = {
10734 { 0x00000200, 0x00008},
10735 { 0x00004000, 0x00800},
10736 { 0x00006000, 0x09800},
10737 { 0x00010000, 0x0a000},
10738 { 0xffffffff, 0x00000}
10740 struct mem_entry *mem_tbl;
10744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
10745 mem_tbl = mem_tbl_5717;
10746 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10747 mem_tbl = mem_tbl_57765;
10748 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10749 mem_tbl = mem_tbl_5755;
10750 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10751 mem_tbl = mem_tbl_5906;
10752 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10753 mem_tbl = mem_tbl_5705;
10755 mem_tbl = mem_tbl_570x;
10757 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10758 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10759 mem_tbl[i].len)) != 0)
10766 #define TG3_MAC_LOOPBACK 0
10767 #define TG3_PHY_LOOPBACK 1
10769 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10771 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10772 u32 desc_idx, coal_now;
10773 struct sk_buff *skb, *rx_skb;
10776 int num_pkts, tx_len, rx_len, i, err;
10777 struct tg3_rx_buffer_desc *desc;
10778 struct tg3_napi *tnapi, *rnapi;
10779 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10781 if (tp->irq_cnt > 1) {
10782 tnapi = &tp->napi[1];
10783 rnapi = &tp->napi[1];
10785 tnapi = &tp->napi[0];
10786 rnapi = &tp->napi[0];
10788 coal_now = tnapi->coal_now | rnapi->coal_now;
10790 if (loopback_mode == TG3_MAC_LOOPBACK) {
10791 /* HW errata - mac loopback fails in some cases on 5780.
10792 * Normal traffic and PHY loopback are not affected by
10795 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10798 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10799 MAC_MODE_PORT_INT_LPBACK;
10800 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10801 mac_mode |= MAC_MODE_LINK_POLARITY;
10802 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10803 mac_mode |= MAC_MODE_PORT_MODE_MII;
10805 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10806 tw32(MAC_MODE, mac_mode);
10807 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10810 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10811 tg3_phy_fet_toggle_apd(tp, false);
10812 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10814 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10816 tg3_phy_toggle_automdix(tp, 0);
10818 tg3_writephy(tp, MII_BMCR, val);
10821 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10822 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10824 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10825 mac_mode |= MAC_MODE_PORT_MODE_MII;
10827 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10829 /* reset to prevent losing 1st rx packet intermittently */
10830 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10831 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10833 tw32_f(MAC_RX_MODE, tp->rx_mode);
10835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10836 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10837 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10838 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10839 mac_mode |= MAC_MODE_LINK_POLARITY;
10840 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10841 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10843 tw32(MAC_MODE, mac_mode);
10851 skb = netdev_alloc_skb(tp->dev, tx_len);
10855 tx_data = skb_put(skb, tx_len);
10856 memcpy(tx_data, tp->dev->dev_addr, 6);
10857 memset(tx_data + 6, 0x0, 8);
10859 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10861 for (i = 14; i < tx_len; i++)
10862 tx_data[i] = (u8) (i & 0xff);
10864 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10865 if (pci_dma_mapping_error(tp->pdev, map)) {
10866 dev_kfree_skb(skb);
10870 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10875 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10879 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10884 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10885 tr32_mailbox(tnapi->prodmbox);
10889 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10890 for (i = 0; i < 35; i++) {
10891 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10896 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10897 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10898 if ((tx_idx == tnapi->tx_prod) &&
10899 (rx_idx == (rx_start_idx + num_pkts)))
10903 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10904 dev_kfree_skb(skb);
10906 if (tx_idx != tnapi->tx_prod)
10909 if (rx_idx != rx_start_idx + num_pkts)
10912 desc = &rnapi->rx_rcb[rx_start_idx];
10913 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10914 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10915 if (opaque_key != RXD_OPAQUE_RING_STD)
10918 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10919 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10922 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10923 if (rx_len != tx_len)
10926 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10928 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10929 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10931 for (i = 14; i < tx_len; i++) {
10932 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10937 /* tg3_free_rings will unmap and free the rx_skb */
10942 #define TG3_MAC_LOOPBACK_FAILED 1
10943 #define TG3_PHY_LOOPBACK_FAILED 2
10944 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10945 TG3_PHY_LOOPBACK_FAILED)
10947 static int tg3_test_loopback(struct tg3 *tp)
10952 if (!netif_running(tp->dev))
10953 return TG3_LOOPBACK_FAILED;
10955 err = tg3_reset_hw(tp, 1);
10957 return TG3_LOOPBACK_FAILED;
10959 /* Turn off gphy autopowerdown. */
10960 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10961 tg3_phy_toggle_apd(tp, false);
10963 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10967 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10969 /* Wait for up to 40 microseconds to acquire lock. */
10970 for (i = 0; i < 4; i++) {
10971 status = tr32(TG3_CPMU_MUTEX_GNT);
10972 if (status == CPMU_MUTEX_GNT_DRIVER)
10977 if (status != CPMU_MUTEX_GNT_DRIVER)
10978 return TG3_LOOPBACK_FAILED;
10980 /* Turn off link-based power management. */
10981 cpmuctrl = tr32(TG3_CPMU_CTRL);
10982 tw32(TG3_CPMU_CTRL,
10983 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10984 CPMU_CTRL_LINK_AWARE_MODE));
10987 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10988 err |= TG3_MAC_LOOPBACK_FAILED;
10990 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10991 tw32(TG3_CPMU_CTRL, cpmuctrl);
10993 /* Release the mutex */
10994 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10997 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10998 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10999 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
11000 err |= TG3_PHY_LOOPBACK_FAILED;
11003 /* Re-enable gphy autopowerdown. */
11004 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
11005 tg3_phy_toggle_apd(tp, true);
11010 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11013 struct tg3 *tp = netdev_priv(dev);
11015 if (tp->link_config.phy_is_low_power)
11016 tg3_set_power_state(tp, PCI_D0);
11018 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11020 if (tg3_test_nvram(tp) != 0) {
11021 etest->flags |= ETH_TEST_FL_FAILED;
11024 if (tg3_test_link(tp) != 0) {
11025 etest->flags |= ETH_TEST_FL_FAILED;
11028 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11029 int err, err2 = 0, irq_sync = 0;
11031 if (netif_running(dev)) {
11033 tg3_netif_stop(tp);
11037 tg3_full_lock(tp, irq_sync);
11039 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11040 err = tg3_nvram_lock(tp);
11041 tg3_halt_cpu(tp, RX_CPU_BASE);
11042 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11043 tg3_halt_cpu(tp, TX_CPU_BASE);
11045 tg3_nvram_unlock(tp);
11047 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
11050 if (tg3_test_registers(tp) != 0) {
11051 etest->flags |= ETH_TEST_FL_FAILED;
11054 if (tg3_test_memory(tp) != 0) {
11055 etest->flags |= ETH_TEST_FL_FAILED;
11058 if ((data[4] = tg3_test_loopback(tp)) != 0)
11059 etest->flags |= ETH_TEST_FL_FAILED;
11061 tg3_full_unlock(tp);
11063 if (tg3_test_interrupt(tp) != 0) {
11064 etest->flags |= ETH_TEST_FL_FAILED;
11068 tg3_full_lock(tp, 0);
11070 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11071 if (netif_running(dev)) {
11072 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11073 err2 = tg3_restart_hw(tp, 1);
11075 tg3_netif_start(tp);
11078 tg3_full_unlock(tp);
11080 if (irq_sync && !err2)
11083 if (tp->link_config.phy_is_low_power)
11084 tg3_set_power_state(tp, PCI_D3hot);
11088 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11090 struct mii_ioctl_data *data = if_mii(ifr);
11091 struct tg3 *tp = netdev_priv(dev);
11094 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11095 struct phy_device *phydev;
11096 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
11098 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11099 return phy_mii_ioctl(phydev, data, cmd);
11104 data->phy_id = tp->phy_addr;
11107 case SIOCGMIIREG: {
11110 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11111 break; /* We have no PHY */
11113 if (tp->link_config.phy_is_low_power)
11116 spin_lock_bh(&tp->lock);
11117 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11118 spin_unlock_bh(&tp->lock);
11120 data->val_out = mii_regval;
11126 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11127 break; /* We have no PHY */
11129 if (tp->link_config.phy_is_low_power)
11132 spin_lock_bh(&tp->lock);
11133 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11134 spin_unlock_bh(&tp->lock);
11142 return -EOPNOTSUPP;
11145 #if TG3_VLAN_TAG_USED
11146 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11148 struct tg3 *tp = netdev_priv(dev);
11150 if (!netif_running(dev)) {
11155 tg3_netif_stop(tp);
11157 tg3_full_lock(tp, 0);
11161 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11162 __tg3_set_rx_mode(dev);
11164 tg3_netif_start(tp);
11166 tg3_full_unlock(tp);
11170 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11172 struct tg3 *tp = netdev_priv(dev);
11174 memcpy(ec, &tp->coal, sizeof(*ec));
11178 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11180 struct tg3 *tp = netdev_priv(dev);
11181 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11182 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11184 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11185 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11186 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11187 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11188 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11191 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11192 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11193 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11194 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11195 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11196 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11197 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11198 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11199 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11200 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11203 /* No rx interrupts will be generated if both are zero */
11204 if ((ec->rx_coalesce_usecs == 0) &&
11205 (ec->rx_max_coalesced_frames == 0))
11208 /* No tx interrupts will be generated if both are zero */
11209 if ((ec->tx_coalesce_usecs == 0) &&
11210 (ec->tx_max_coalesced_frames == 0))
11213 /* Only copy relevant parameters, ignore all others. */
11214 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11215 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11216 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11217 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11218 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11219 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11220 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11221 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11222 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11224 if (netif_running(dev)) {
11225 tg3_full_lock(tp, 0);
11226 __tg3_set_coalesce(tp, &tp->coal);
11227 tg3_full_unlock(tp);
11232 static const struct ethtool_ops tg3_ethtool_ops = {
11233 .get_settings = tg3_get_settings,
11234 .set_settings = tg3_set_settings,
11235 .get_drvinfo = tg3_get_drvinfo,
11236 .get_regs_len = tg3_get_regs_len,
11237 .get_regs = tg3_get_regs,
11238 .get_wol = tg3_get_wol,
11239 .set_wol = tg3_set_wol,
11240 .get_msglevel = tg3_get_msglevel,
11241 .set_msglevel = tg3_set_msglevel,
11242 .nway_reset = tg3_nway_reset,
11243 .get_link = ethtool_op_get_link,
11244 .get_eeprom_len = tg3_get_eeprom_len,
11245 .get_eeprom = tg3_get_eeprom,
11246 .set_eeprom = tg3_set_eeprom,
11247 .get_ringparam = tg3_get_ringparam,
11248 .set_ringparam = tg3_set_ringparam,
11249 .get_pauseparam = tg3_get_pauseparam,
11250 .set_pauseparam = tg3_set_pauseparam,
11251 .get_rx_csum = tg3_get_rx_csum,
11252 .set_rx_csum = tg3_set_rx_csum,
11253 .set_tx_csum = tg3_set_tx_csum,
11254 .set_sg = ethtool_op_set_sg,
11255 .set_tso = tg3_set_tso,
11256 .self_test = tg3_self_test,
11257 .get_strings = tg3_get_strings,
11258 .phys_id = tg3_phys_id,
11259 .get_ethtool_stats = tg3_get_ethtool_stats,
11260 .get_coalesce = tg3_get_coalesce,
11261 .set_coalesce = tg3_set_coalesce,
11262 .get_sset_count = tg3_get_sset_count,
11265 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11267 u32 cursize, val, magic;
11269 tp->nvram_size = EEPROM_CHIP_SIZE;
11271 if (tg3_nvram_read(tp, 0, &magic) != 0)
11274 if ((magic != TG3_EEPROM_MAGIC) &&
11275 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11276 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11280 * Size the chip by reading offsets at increasing powers of two.
11281 * When we encounter our validation signature, we know the addressing
11282 * has wrapped around, and thus have our chip size.
11286 while (cursize < tp->nvram_size) {
11287 if (tg3_nvram_read(tp, cursize, &val) != 0)
11296 tp->nvram_size = cursize;
11299 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11303 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11304 tg3_nvram_read(tp, 0, &val) != 0)
11307 /* Selfboot format */
11308 if (val != TG3_EEPROM_MAGIC) {
11309 tg3_get_eeprom_size(tp);
11313 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11315 /* This is confusing. We want to operate on the
11316 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11317 * call will read from NVRAM and byteswap the data
11318 * according to the byteswapping settings for all
11319 * other register accesses. This ensures the data we
11320 * want will always reside in the lower 16-bits.
11321 * However, the data in NVRAM is in LE format, which
11322 * means the data from the NVRAM read will always be
11323 * opposite the endianness of the CPU. The 16-bit
11324 * byteswap then brings the data to CPU endianness.
11326 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11330 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11333 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11337 nvcfg1 = tr32(NVRAM_CFG1);
11338 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11339 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11341 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11342 tw32(NVRAM_CFG1, nvcfg1);
11345 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11346 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11347 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11348 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11349 tp->nvram_jedecnum = JEDEC_ATMEL;
11350 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11351 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11353 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11354 tp->nvram_jedecnum = JEDEC_ATMEL;
11355 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11357 case FLASH_VENDOR_ATMEL_EEPROM:
11358 tp->nvram_jedecnum = JEDEC_ATMEL;
11359 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11360 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11362 case FLASH_VENDOR_ST:
11363 tp->nvram_jedecnum = JEDEC_ST;
11364 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11365 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11367 case FLASH_VENDOR_SAIFUN:
11368 tp->nvram_jedecnum = JEDEC_SAIFUN;
11369 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11371 case FLASH_VENDOR_SST_SMALL:
11372 case FLASH_VENDOR_SST_LARGE:
11373 tp->nvram_jedecnum = JEDEC_SST;
11374 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11378 tp->nvram_jedecnum = JEDEC_ATMEL;
11379 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11380 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11384 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11386 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11387 case FLASH_5752PAGE_SIZE_256:
11388 tp->nvram_pagesize = 256;
11390 case FLASH_5752PAGE_SIZE_512:
11391 tp->nvram_pagesize = 512;
11393 case FLASH_5752PAGE_SIZE_1K:
11394 tp->nvram_pagesize = 1024;
11396 case FLASH_5752PAGE_SIZE_2K:
11397 tp->nvram_pagesize = 2048;
11399 case FLASH_5752PAGE_SIZE_4K:
11400 tp->nvram_pagesize = 4096;
11402 case FLASH_5752PAGE_SIZE_264:
11403 tp->nvram_pagesize = 264;
11405 case FLASH_5752PAGE_SIZE_528:
11406 tp->nvram_pagesize = 528;
11411 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11415 nvcfg1 = tr32(NVRAM_CFG1);
11417 /* NVRAM protection for TPM */
11418 if (nvcfg1 & (1 << 27))
11419 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11421 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11422 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11423 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11424 tp->nvram_jedecnum = JEDEC_ATMEL;
11425 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11427 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11428 tp->nvram_jedecnum = JEDEC_ATMEL;
11429 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11430 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11432 case FLASH_5752VENDOR_ST_M45PE10:
11433 case FLASH_5752VENDOR_ST_M45PE20:
11434 case FLASH_5752VENDOR_ST_M45PE40:
11435 tp->nvram_jedecnum = JEDEC_ST;
11436 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11437 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11441 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11442 tg3_nvram_get_pagesize(tp, nvcfg1);
11444 /* For eeprom, set pagesize to maximum eeprom size */
11445 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11447 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11448 tw32(NVRAM_CFG1, nvcfg1);
11452 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11454 u32 nvcfg1, protect = 0;
11456 nvcfg1 = tr32(NVRAM_CFG1);
11458 /* NVRAM protection for TPM */
11459 if (nvcfg1 & (1 << 27)) {
11460 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11464 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11466 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11467 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11468 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11469 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11470 tp->nvram_jedecnum = JEDEC_ATMEL;
11471 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11472 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11473 tp->nvram_pagesize = 264;
11474 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11475 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11476 tp->nvram_size = (protect ? 0x3e200 :
11477 TG3_NVRAM_SIZE_512KB);
11478 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11479 tp->nvram_size = (protect ? 0x1f200 :
11480 TG3_NVRAM_SIZE_256KB);
11482 tp->nvram_size = (protect ? 0x1f200 :
11483 TG3_NVRAM_SIZE_128KB);
11485 case FLASH_5752VENDOR_ST_M45PE10:
11486 case FLASH_5752VENDOR_ST_M45PE20:
11487 case FLASH_5752VENDOR_ST_M45PE40:
11488 tp->nvram_jedecnum = JEDEC_ST;
11489 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11490 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11491 tp->nvram_pagesize = 256;
11492 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11493 tp->nvram_size = (protect ?
11494 TG3_NVRAM_SIZE_64KB :
11495 TG3_NVRAM_SIZE_128KB);
11496 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11497 tp->nvram_size = (protect ?
11498 TG3_NVRAM_SIZE_64KB :
11499 TG3_NVRAM_SIZE_256KB);
11501 tp->nvram_size = (protect ?
11502 TG3_NVRAM_SIZE_128KB :
11503 TG3_NVRAM_SIZE_512KB);
11508 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11512 nvcfg1 = tr32(NVRAM_CFG1);
11514 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11515 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11516 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11517 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11518 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11519 tp->nvram_jedecnum = JEDEC_ATMEL;
11520 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11521 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11523 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11524 tw32(NVRAM_CFG1, nvcfg1);
11526 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11527 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11528 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11529 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11530 tp->nvram_jedecnum = JEDEC_ATMEL;
11531 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11532 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11533 tp->nvram_pagesize = 264;
11535 case FLASH_5752VENDOR_ST_M45PE10:
11536 case FLASH_5752VENDOR_ST_M45PE20:
11537 case FLASH_5752VENDOR_ST_M45PE40:
11538 tp->nvram_jedecnum = JEDEC_ST;
11539 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11540 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11541 tp->nvram_pagesize = 256;
11546 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11548 u32 nvcfg1, protect = 0;
11550 nvcfg1 = tr32(NVRAM_CFG1);
11552 /* NVRAM protection for TPM */
11553 if (nvcfg1 & (1 << 27)) {
11554 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11558 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11560 case FLASH_5761VENDOR_ATMEL_ADB021D:
11561 case FLASH_5761VENDOR_ATMEL_ADB041D:
11562 case FLASH_5761VENDOR_ATMEL_ADB081D:
11563 case FLASH_5761VENDOR_ATMEL_ADB161D:
11564 case FLASH_5761VENDOR_ATMEL_MDB021D:
11565 case FLASH_5761VENDOR_ATMEL_MDB041D:
11566 case FLASH_5761VENDOR_ATMEL_MDB081D:
11567 case FLASH_5761VENDOR_ATMEL_MDB161D:
11568 tp->nvram_jedecnum = JEDEC_ATMEL;
11569 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11570 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11571 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11572 tp->nvram_pagesize = 256;
11574 case FLASH_5761VENDOR_ST_A_M45PE20:
11575 case FLASH_5761VENDOR_ST_A_M45PE40:
11576 case FLASH_5761VENDOR_ST_A_M45PE80:
11577 case FLASH_5761VENDOR_ST_A_M45PE16:
11578 case FLASH_5761VENDOR_ST_M_M45PE20:
11579 case FLASH_5761VENDOR_ST_M_M45PE40:
11580 case FLASH_5761VENDOR_ST_M_M45PE80:
11581 case FLASH_5761VENDOR_ST_M_M45PE16:
11582 tp->nvram_jedecnum = JEDEC_ST;
11583 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11584 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11585 tp->nvram_pagesize = 256;
11590 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11593 case FLASH_5761VENDOR_ATMEL_ADB161D:
11594 case FLASH_5761VENDOR_ATMEL_MDB161D:
11595 case FLASH_5761VENDOR_ST_A_M45PE16:
11596 case FLASH_5761VENDOR_ST_M_M45PE16:
11597 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11599 case FLASH_5761VENDOR_ATMEL_ADB081D:
11600 case FLASH_5761VENDOR_ATMEL_MDB081D:
11601 case FLASH_5761VENDOR_ST_A_M45PE80:
11602 case FLASH_5761VENDOR_ST_M_M45PE80:
11603 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11605 case FLASH_5761VENDOR_ATMEL_ADB041D:
11606 case FLASH_5761VENDOR_ATMEL_MDB041D:
11607 case FLASH_5761VENDOR_ST_A_M45PE40:
11608 case FLASH_5761VENDOR_ST_M_M45PE40:
11609 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11611 case FLASH_5761VENDOR_ATMEL_ADB021D:
11612 case FLASH_5761VENDOR_ATMEL_MDB021D:
11613 case FLASH_5761VENDOR_ST_A_M45PE20:
11614 case FLASH_5761VENDOR_ST_M_M45PE20:
11615 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11621 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11623 tp->nvram_jedecnum = JEDEC_ATMEL;
11624 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11625 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11628 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11632 nvcfg1 = tr32(NVRAM_CFG1);
11634 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11635 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11636 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11637 tp->nvram_jedecnum = JEDEC_ATMEL;
11638 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11639 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11641 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11642 tw32(NVRAM_CFG1, nvcfg1);
11644 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11645 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11646 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11647 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11648 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11649 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11650 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11651 tp->nvram_jedecnum = JEDEC_ATMEL;
11652 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11653 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11655 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11656 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11657 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11658 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11659 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11661 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11662 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11663 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11665 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11666 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11667 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11671 case FLASH_5752VENDOR_ST_M45PE10:
11672 case FLASH_5752VENDOR_ST_M45PE20:
11673 case FLASH_5752VENDOR_ST_M45PE40:
11674 tp->nvram_jedecnum = JEDEC_ST;
11675 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11676 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11678 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11679 case FLASH_5752VENDOR_ST_M45PE10:
11680 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11682 case FLASH_5752VENDOR_ST_M45PE20:
11683 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11685 case FLASH_5752VENDOR_ST_M45PE40:
11686 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11691 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11695 tg3_nvram_get_pagesize(tp, nvcfg1);
11696 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11697 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11701 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11705 nvcfg1 = tr32(NVRAM_CFG1);
11707 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11708 case FLASH_5717VENDOR_ATMEL_EEPROM:
11709 case FLASH_5717VENDOR_MICRO_EEPROM:
11710 tp->nvram_jedecnum = JEDEC_ATMEL;
11711 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11712 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11714 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11715 tw32(NVRAM_CFG1, nvcfg1);
11717 case FLASH_5717VENDOR_ATMEL_MDB011D:
11718 case FLASH_5717VENDOR_ATMEL_ADB011B:
11719 case FLASH_5717VENDOR_ATMEL_ADB011D:
11720 case FLASH_5717VENDOR_ATMEL_MDB021D:
11721 case FLASH_5717VENDOR_ATMEL_ADB021B:
11722 case FLASH_5717VENDOR_ATMEL_ADB021D:
11723 case FLASH_5717VENDOR_ATMEL_45USPT:
11724 tp->nvram_jedecnum = JEDEC_ATMEL;
11725 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11726 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11728 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11729 case FLASH_5717VENDOR_ATMEL_MDB021D:
11730 case FLASH_5717VENDOR_ATMEL_ADB021B:
11731 case FLASH_5717VENDOR_ATMEL_ADB021D:
11732 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11735 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11739 case FLASH_5717VENDOR_ST_M_M25PE10:
11740 case FLASH_5717VENDOR_ST_A_M25PE10:
11741 case FLASH_5717VENDOR_ST_M_M45PE10:
11742 case FLASH_5717VENDOR_ST_A_M45PE10:
11743 case FLASH_5717VENDOR_ST_M_M25PE20:
11744 case FLASH_5717VENDOR_ST_A_M25PE20:
11745 case FLASH_5717VENDOR_ST_M_M45PE20:
11746 case FLASH_5717VENDOR_ST_A_M45PE20:
11747 case FLASH_5717VENDOR_ST_25USPT:
11748 case FLASH_5717VENDOR_ST_45USPT:
11749 tp->nvram_jedecnum = JEDEC_ST;
11750 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11751 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11753 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11754 case FLASH_5717VENDOR_ST_M_M25PE20:
11755 case FLASH_5717VENDOR_ST_A_M25PE20:
11756 case FLASH_5717VENDOR_ST_M_M45PE20:
11757 case FLASH_5717VENDOR_ST_A_M45PE20:
11758 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11761 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11766 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11770 tg3_nvram_get_pagesize(tp, nvcfg1);
11771 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11772 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11775 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11776 static void __devinit tg3_nvram_init(struct tg3 *tp)
11778 tw32_f(GRC_EEPROM_ADDR,
11779 (EEPROM_ADDR_FSM_RESET |
11780 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11781 EEPROM_ADDR_CLKPERD_SHIFT)));
11785 /* Enable seeprom accesses. */
11786 tw32_f(GRC_LOCAL_CTRL,
11787 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11790 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11791 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11792 tp->tg3_flags |= TG3_FLAG_NVRAM;
11794 if (tg3_nvram_lock(tp)) {
11795 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11796 "tg3_nvram_init failed.\n", tp->dev->name);
11799 tg3_enable_nvram_access(tp);
11801 tp->nvram_size = 0;
11803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11804 tg3_get_5752_nvram_info(tp);
11805 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11806 tg3_get_5755_nvram_info(tp);
11807 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11810 tg3_get_5787_nvram_info(tp);
11811 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11812 tg3_get_5761_nvram_info(tp);
11813 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11814 tg3_get_5906_nvram_info(tp);
11815 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11817 tg3_get_57780_nvram_info(tp);
11818 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11819 tg3_get_5717_nvram_info(tp);
11821 tg3_get_nvram_info(tp);
11823 if (tp->nvram_size == 0)
11824 tg3_get_nvram_size(tp);
11826 tg3_disable_nvram_access(tp);
11827 tg3_nvram_unlock(tp);
11830 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11832 tg3_get_eeprom_size(tp);
11836 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11837 u32 offset, u32 len, u8 *buf)
11842 for (i = 0; i < len; i += 4) {
11848 memcpy(&data, buf + i, 4);
11851 * The SEEPROM interface expects the data to always be opposite
11852 * the native endian format. We accomplish this by reversing
11853 * all the operations that would have been performed on the
11854 * data from a call to tg3_nvram_read_be32().
11856 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11858 val = tr32(GRC_EEPROM_ADDR);
11859 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11861 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11863 tw32(GRC_EEPROM_ADDR, val |
11864 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11865 (addr & EEPROM_ADDR_ADDR_MASK) |
11866 EEPROM_ADDR_START |
11867 EEPROM_ADDR_WRITE);
11869 for (j = 0; j < 1000; j++) {
11870 val = tr32(GRC_EEPROM_ADDR);
11872 if (val & EEPROM_ADDR_COMPLETE)
11876 if (!(val & EEPROM_ADDR_COMPLETE)) {
11885 /* offset and length are dword aligned */
11886 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11890 u32 pagesize = tp->nvram_pagesize;
11891 u32 pagemask = pagesize - 1;
11895 tmp = kmalloc(pagesize, GFP_KERNEL);
11901 u32 phy_addr, page_off, size;
11903 phy_addr = offset & ~pagemask;
11905 for (j = 0; j < pagesize; j += 4) {
11906 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11907 (__be32 *) (tmp + j));
11914 page_off = offset & pagemask;
11921 memcpy(tmp + page_off, buf, size);
11923 offset = offset + (pagesize - page_off);
11925 tg3_enable_nvram_access(tp);
11928 * Before we can erase the flash page, we need
11929 * to issue a special "write enable" command.
11931 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11933 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11936 /* Erase the target page */
11937 tw32(NVRAM_ADDR, phy_addr);
11939 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11940 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11942 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11945 /* Issue another write enable to start the write. */
11946 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11948 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11951 for (j = 0; j < pagesize; j += 4) {
11954 data = *((__be32 *) (tmp + j));
11956 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11958 tw32(NVRAM_ADDR, phy_addr + j);
11960 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11964 nvram_cmd |= NVRAM_CMD_FIRST;
11965 else if (j == (pagesize - 4))
11966 nvram_cmd |= NVRAM_CMD_LAST;
11968 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11975 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11976 tg3_nvram_exec_cmd(tp, nvram_cmd);
11983 /* offset and length are dword aligned */
11984 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11989 for (i = 0; i < len; i += 4, offset += 4) {
11990 u32 page_off, phy_addr, nvram_cmd;
11993 memcpy(&data, buf + i, 4);
11994 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11996 page_off = offset % tp->nvram_pagesize;
11998 phy_addr = tg3_nvram_phys_addr(tp, offset);
12000 tw32(NVRAM_ADDR, phy_addr);
12002 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12004 if ((page_off == 0) || (i == 0))
12005 nvram_cmd |= NVRAM_CMD_FIRST;
12006 if (page_off == (tp->nvram_pagesize - 4))
12007 nvram_cmd |= NVRAM_CMD_LAST;
12009 if (i == (len - 4))
12010 nvram_cmd |= NVRAM_CMD_LAST;
12012 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12013 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12014 (tp->nvram_jedecnum == JEDEC_ST) &&
12015 (nvram_cmd & NVRAM_CMD_FIRST)) {
12017 if ((ret = tg3_nvram_exec_cmd(tp,
12018 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12023 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12024 /* We always do complete word writes to eeprom. */
12025 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12028 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12034 /* offset and length are dword aligned */
12035 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12039 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12040 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12041 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12045 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12046 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12051 ret = tg3_nvram_lock(tp);
12055 tg3_enable_nvram_access(tp);
12056 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12057 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12058 tw32(NVRAM_WRITE1, 0x406);
12060 grc_mode = tr32(GRC_MODE);
12061 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12063 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12064 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12066 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12070 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12074 grc_mode = tr32(GRC_MODE);
12075 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12077 tg3_disable_nvram_access(tp);
12078 tg3_nvram_unlock(tp);
12081 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12082 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12089 struct subsys_tbl_ent {
12090 u16 subsys_vendor, subsys_devid;
12094 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
12095 /* Broadcom boards. */
12096 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
12097 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
12098 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
12099 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
12100 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
12101 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
12102 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
12103 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
12104 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
12105 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
12106 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
12109 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
12110 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
12111 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
12112 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
12113 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
12116 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
12117 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
12118 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
12119 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
12121 /* Compaq boards. */
12122 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
12123 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
12124 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
12125 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
12126 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
12129 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
12132 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
12136 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12137 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12138 tp->pdev->subsystem_vendor) &&
12139 (subsys_id_to_phy_id[i].subsys_devid ==
12140 tp->pdev->subsystem_device))
12141 return &subsys_id_to_phy_id[i];
12146 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12151 /* On some early chips the SRAM cannot be accessed in D3hot state,
12152 * so need make sure we're in D0.
12154 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12155 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12156 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12159 /* Make sure register accesses (indirect or otherwise)
12160 * will function correctly.
12162 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12163 tp->misc_host_ctrl);
12165 /* The memory arbiter has to be enabled in order for SRAM accesses
12166 * to succeed. Normally on powerup the tg3 chip firmware will make
12167 * sure it is enabled, but other entities such as system netboot
12168 * code might disable it.
12170 val = tr32(MEMARB_MODE);
12171 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12173 tp->phy_id = PHY_ID_INVALID;
12174 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12176 /* Assume an onboard device and WOL capable by default. */
12177 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12180 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12181 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12182 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12184 val = tr32(VCPU_CFGSHDW);
12185 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12186 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12187 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12188 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12189 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12193 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12194 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12195 u32 nic_cfg, led_cfg;
12196 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12197 int eeprom_phy_serdes = 0;
12199 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12200 tp->nic_sram_data_cfg = nic_cfg;
12202 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12203 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12204 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12205 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12206 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12207 (ver > 0) && (ver < 0x100))
12208 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12211 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12213 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12214 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12215 eeprom_phy_serdes = 1;
12217 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12218 if (nic_phy_id != 0) {
12219 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12220 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12222 eeprom_phy_id = (id1 >> 16) << 10;
12223 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12224 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12228 tp->phy_id = eeprom_phy_id;
12229 if (eeprom_phy_serdes) {
12230 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12231 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12232 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12234 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12237 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12238 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12239 SHASTA_EXT_LED_MODE_MASK);
12241 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12245 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12246 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12249 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12250 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12253 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12254 tp->led_ctrl = LED_CTRL_MODE_MAC;
12256 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12257 * read on some older 5700/5701 bootcode.
12259 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12261 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12263 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12267 case SHASTA_EXT_LED_SHARED:
12268 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12269 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12270 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12271 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12272 LED_CTRL_MODE_PHY_2);
12275 case SHASTA_EXT_LED_MAC:
12276 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12279 case SHASTA_EXT_LED_COMBO:
12280 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12281 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12282 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12283 LED_CTRL_MODE_PHY_2);
12288 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12290 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12291 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12293 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12294 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12296 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12297 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12298 if ((tp->pdev->subsystem_vendor ==
12299 PCI_VENDOR_ID_ARIMA) &&
12300 (tp->pdev->subsystem_device == 0x205a ||
12301 tp->pdev->subsystem_device == 0x2063))
12302 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12304 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12305 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12308 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12309 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12310 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12311 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12314 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12315 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12316 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12318 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
12319 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12320 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12322 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12323 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12324 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12326 if (cfg2 & (1 << 17))
12327 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
12329 /* serdes signal pre-emphasis in register 0x590 set by */
12330 /* bootcode if bit 18 is set */
12331 if (cfg2 & (1 << 18))
12332 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
12334 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12335 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12336 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12337 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
12339 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12342 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12343 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12344 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12347 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
12348 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
12349 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12350 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12351 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12352 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12355 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12356 device_set_wakeup_enable(&tp->pdev->dev,
12357 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12360 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12365 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12366 tw32(OTP_CTRL, cmd);
12368 /* Wait for up to 1 ms for command to execute. */
12369 for (i = 0; i < 100; i++) {
12370 val = tr32(OTP_STATUS);
12371 if (val & OTP_STATUS_CMD_DONE)
12376 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12379 /* Read the gphy configuration from the OTP region of the chip. The gphy
12380 * configuration is a 32-bit value that straddles the alignment boundary.
12381 * We do two 32-bit reads and then shift and merge the results.
12383 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12385 u32 bhalf_otp, thalf_otp;
12387 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12389 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12392 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12394 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12397 thalf_otp = tr32(OTP_READ_DATA);
12399 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12401 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12404 bhalf_otp = tr32(OTP_READ_DATA);
12406 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12409 static int __devinit tg3_phy_probe(struct tg3 *tp)
12411 u32 hw_phy_id_1, hw_phy_id_2;
12412 u32 hw_phy_id, hw_phy_id_masked;
12415 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12416 return tg3_phy_init(tp);
12418 /* Reading the PHY ID register can conflict with ASF
12419 * firmware access to the PHY hardware.
12422 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12423 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12424 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
12426 /* Now read the physical PHY_ID from the chip and verify
12427 * that it is sane. If it doesn't look good, we fall back
12428 * to either the hard-coded table based PHY_ID and failing
12429 * that the value found in the eeprom area.
12431 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12432 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12434 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12435 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12436 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12438 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
12441 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
12442 tp->phy_id = hw_phy_id;
12443 if (hw_phy_id_masked == PHY_ID_BCM8002)
12444 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12446 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12448 if (tp->phy_id != PHY_ID_INVALID) {
12449 /* Do nothing, phy ID already set up in
12450 * tg3_get_eeprom_hw_cfg().
12453 struct subsys_tbl_ent *p;
12455 /* No eeprom signature? Try the hardcoded
12456 * subsys device table.
12458 p = lookup_by_subsys(tp);
12462 tp->phy_id = p->phy_id;
12464 tp->phy_id == PHY_ID_BCM8002)
12465 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12469 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12470 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12471 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12472 u32 bmsr, adv_reg, tg3_ctrl, mask;
12474 tg3_readphy(tp, MII_BMSR, &bmsr);
12475 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12476 (bmsr & BMSR_LSTATUS))
12477 goto skip_phy_reset;
12479 err = tg3_phy_reset(tp);
12483 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12484 ADVERTISE_100HALF | ADVERTISE_100FULL |
12485 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12487 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12488 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12489 MII_TG3_CTRL_ADV_1000_FULL);
12490 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12491 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12492 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12493 MII_TG3_CTRL_ENABLE_AS_MASTER);
12496 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12497 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12498 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12499 if (!tg3_copper_is_advertising_all(tp, mask)) {
12500 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12502 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12503 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12505 tg3_writephy(tp, MII_BMCR,
12506 BMCR_ANENABLE | BMCR_ANRESTART);
12508 tg3_phy_set_wirespeed(tp);
12510 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12511 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12512 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12516 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
12517 err = tg3_init_5401phy_dsp(tp);
12522 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
12523 err = tg3_init_5401phy_dsp(tp);
12526 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12527 tp->link_config.advertising =
12528 (ADVERTISED_1000baseT_Half |
12529 ADVERTISED_1000baseT_Full |
12530 ADVERTISED_Autoneg |
12532 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12533 tp->link_config.advertising &=
12534 ~(ADVERTISED_1000baseT_Half |
12535 ADVERTISED_1000baseT_Full);
12540 static void __devinit tg3_read_partno(struct tg3 *tp)
12542 unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
12546 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12547 tg3_nvram_read(tp, 0x0, &magic))
12548 goto out_not_found;
12550 if (magic == TG3_EEPROM_MAGIC) {
12551 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12554 /* The data is in little-endian format in NVRAM.
12555 * Use the big-endian read routines to preserve
12556 * the byte order as it exists in NVRAM.
12558 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12559 goto out_not_found;
12561 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12565 unsigned int pos = 0, i = 0;
12567 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12568 cnt = pci_read_vpd(tp->pdev, pos,
12569 TG3_NVM_VPD_LEN - pos,
12571 if (cnt == -ETIMEDOUT || -EINTR)
12574 goto out_not_found;
12576 if (pos != TG3_NVM_VPD_LEN)
12577 goto out_not_found;
12580 /* Now parse and find the part number. */
12581 for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) {
12582 unsigned char val = vpd_data[i];
12583 unsigned int block_end;
12585 if (val == 0x82 || val == 0x91) {
12588 (vpd_data[i + 2] << 8)));
12593 goto out_not_found;
12595 block_end = (i + 3 +
12597 (vpd_data[i + 2] << 8)));
12600 if (block_end > TG3_NVM_VPD_LEN)
12601 goto out_not_found;
12603 while (i < (block_end - 2)) {
12604 if (vpd_data[i + 0] == 'P' &&
12605 vpd_data[i + 1] == 'N') {
12606 int partno_len = vpd_data[i + 2];
12609 if (partno_len > TG3_BPN_SIZE ||
12610 (partno_len + i) > TG3_NVM_VPD_LEN)
12611 goto out_not_found;
12613 memcpy(tp->board_part_number,
12614 &vpd_data[i], partno_len);
12619 i += 3 + vpd_data[i + 2];
12622 /* Part number not found. */
12623 goto out_not_found;
12627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12628 strcpy(tp->board_part_number, "BCM95906");
12629 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12630 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12631 strcpy(tp->board_part_number, "BCM57780");
12632 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12633 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12634 strcpy(tp->board_part_number, "BCM57760");
12635 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12636 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12637 strcpy(tp->board_part_number, "BCM57790");
12638 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12639 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12640 strcpy(tp->board_part_number, "BCM57788");
12641 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12642 strcpy(tp->board_part_number, "BCM57765");
12644 strcpy(tp->board_part_number, "none");
12647 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12651 if (tg3_nvram_read(tp, offset, &val) ||
12652 (val & 0xfc000000) != 0x0c000000 ||
12653 tg3_nvram_read(tp, offset + 4, &val) ||
12660 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12662 u32 val, offset, start, ver_offset;
12664 bool newver = false;
12666 if (tg3_nvram_read(tp, 0xc, &offset) ||
12667 tg3_nvram_read(tp, 0x4, &start))
12670 offset = tg3_nvram_logical_addr(tp, offset);
12672 if (tg3_nvram_read(tp, offset, &val))
12675 if ((val & 0xfc000000) == 0x0c000000) {
12676 if (tg3_nvram_read(tp, offset + 4, &val))
12684 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12687 offset = offset + ver_offset - start;
12688 for (i = 0; i < 16; i += 4) {
12690 if (tg3_nvram_read_be32(tp, offset + i, &v))
12693 memcpy(tp->fw_ver + i, &v, sizeof(v));
12698 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12701 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12702 TG3_NVM_BCVER_MAJSFT;
12703 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12704 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12708 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12710 u32 val, major, minor;
12712 /* Use native endian representation */
12713 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12716 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12717 TG3_NVM_HWSB_CFG1_MAJSFT;
12718 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12719 TG3_NVM_HWSB_CFG1_MINSFT;
12721 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12724 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12726 u32 offset, major, minor, build;
12728 tp->fw_ver[0] = 's';
12729 tp->fw_ver[1] = 'b';
12730 tp->fw_ver[2] = '\0';
12732 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12735 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12736 case TG3_EEPROM_SB_REVISION_0:
12737 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12739 case TG3_EEPROM_SB_REVISION_2:
12740 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12742 case TG3_EEPROM_SB_REVISION_3:
12743 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12749 if (tg3_nvram_read(tp, offset, &val))
12752 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12753 TG3_EEPROM_SB_EDH_BLD_SHFT;
12754 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12755 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12756 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12758 if (minor > 99 || build > 26)
12761 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12764 tp->fw_ver[8] = 'a' + build - 1;
12765 tp->fw_ver[9] = '\0';
12769 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12771 u32 val, offset, start;
12774 for (offset = TG3_NVM_DIR_START;
12775 offset < TG3_NVM_DIR_END;
12776 offset += TG3_NVM_DIRENT_SIZE) {
12777 if (tg3_nvram_read(tp, offset, &val))
12780 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12784 if (offset == TG3_NVM_DIR_END)
12787 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12788 start = 0x08000000;
12789 else if (tg3_nvram_read(tp, offset - 4, &start))
12792 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12793 !tg3_fw_img_is_valid(tp, offset) ||
12794 tg3_nvram_read(tp, offset + 8, &val))
12797 offset += val - start;
12799 vlen = strlen(tp->fw_ver);
12801 tp->fw_ver[vlen++] = ',';
12802 tp->fw_ver[vlen++] = ' ';
12804 for (i = 0; i < 4; i++) {
12806 if (tg3_nvram_read_be32(tp, offset, &v))
12809 offset += sizeof(v);
12811 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12812 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12816 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12821 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12826 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12827 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12830 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12831 if (apedata != APE_SEG_SIG_MAGIC)
12834 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12835 if (!(apedata & APE_FW_STATUS_READY))
12838 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12840 vlen = strlen(tp->fw_ver);
12842 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12843 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12844 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12845 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12846 (apedata & APE_FW_VERSION_BLDMSK));
12849 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12853 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12854 tp->fw_ver[0] = 's';
12855 tp->fw_ver[1] = 'b';
12856 tp->fw_ver[2] = '\0';
12861 if (tg3_nvram_read(tp, 0, &val))
12864 if (val == TG3_EEPROM_MAGIC)
12865 tg3_read_bc_ver(tp);
12866 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12867 tg3_read_sb_ver(tp, val);
12868 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12869 tg3_read_hwsb_ver(tp);
12873 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12874 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12877 tg3_read_mgmtfw_ver(tp);
12879 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12882 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12884 static int __devinit tg3_get_invariants(struct tg3 *tp)
12886 static struct pci_device_id write_reorder_chipsets[] = {
12887 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12888 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12889 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12890 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12891 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12892 PCI_DEVICE_ID_VIA_8385_0) },
12896 u32 pci_state_reg, grc_misc_cfg;
12901 /* Force memory write invalidate off. If we leave it on,
12902 * then on 5700_BX chips we have to enable a workaround.
12903 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12904 * to match the cacheline size. The Broadcom driver have this
12905 * workaround but turns MWI off all the times so never uses
12906 * it. This seems to suggest that the workaround is insufficient.
12908 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12909 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12910 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12912 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12913 * has the register indirect write enable bit set before
12914 * we try to access any of the MMIO registers. It is also
12915 * critical that the PCI-X hw workaround situation is decided
12916 * before that as well.
12918 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12921 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12922 MISC_HOST_CTRL_CHIPREV_SHIFT);
12923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12924 u32 prod_id_asic_rev;
12926 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12927 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12928 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12929 pci_read_config_dword(tp->pdev,
12930 TG3PCI_GEN2_PRODID_ASICREV,
12931 &prod_id_asic_rev);
12932 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12933 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12934 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12935 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12936 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12937 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12938 pci_read_config_dword(tp->pdev,
12939 TG3PCI_GEN15_PRODID_ASICREV,
12940 &prod_id_asic_rev);
12942 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12943 &prod_id_asic_rev);
12945 tp->pci_chip_rev_id = prod_id_asic_rev;
12948 /* Wrong chip ID in 5752 A0. This code can be removed later
12949 * as A0 is not in production.
12951 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12952 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12954 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12955 * we need to disable memory and use config. cycles
12956 * only to access all registers. The 5702/03 chips
12957 * can mistakenly decode the special cycles from the
12958 * ICH chipsets as memory write cycles, causing corruption
12959 * of register and memory space. Only certain ICH bridges
12960 * will drive special cycles with non-zero data during the
12961 * address phase which can fall within the 5703's address
12962 * range. This is not an ICH bug as the PCI spec allows
12963 * non-zero address during special cycles. However, only
12964 * these ICH bridges are known to drive non-zero addresses
12965 * during special cycles.
12967 * Since special cycles do not cross PCI bridges, we only
12968 * enable this workaround if the 5703 is on the secondary
12969 * bus of these ICH bridges.
12971 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12972 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12973 static struct tg3_dev_id {
12977 } ich_chipsets[] = {
12978 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12980 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12982 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12984 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12988 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12989 struct pci_dev *bridge = NULL;
12991 while (pci_id->vendor != 0) {
12992 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12998 if (pci_id->rev != PCI_ANY_ID) {
12999 if (bridge->revision > pci_id->rev)
13002 if (bridge->subordinate &&
13003 (bridge->subordinate->number ==
13004 tp->pdev->bus->number)) {
13006 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13007 pci_dev_put(bridge);
13013 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13014 static struct tg3_dev_id {
13017 } bridge_chipsets[] = {
13018 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13019 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13022 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13023 struct pci_dev *bridge = NULL;
13025 while (pci_id->vendor != 0) {
13026 bridge = pci_get_device(pci_id->vendor,
13033 if (bridge->subordinate &&
13034 (bridge->subordinate->number <=
13035 tp->pdev->bus->number) &&
13036 (bridge->subordinate->subordinate >=
13037 tp->pdev->bus->number)) {
13038 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13039 pci_dev_put(bridge);
13045 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13046 * DMA addresses > 40-bit. This bridge may have other additional
13047 * 57xx devices behind it in some 4-port NIC designs for example.
13048 * Any tg3 device found behind the bridge will also need the 40-bit
13051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13053 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13054 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13055 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13058 struct pci_dev *bridge = NULL;
13061 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13062 PCI_DEVICE_ID_SERVERWORKS_EPB,
13064 if (bridge && bridge->subordinate &&
13065 (bridge->subordinate->number <=
13066 tp->pdev->bus->number) &&
13067 (bridge->subordinate->subordinate >=
13068 tp->pdev->bus->number)) {
13069 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13070 pci_dev_put(bridge);
13076 /* Initialize misc host control in PCI block. */
13077 tp->misc_host_ctrl |= (misc_ctrl_reg &
13078 MISC_HOST_CTRL_CHIPREV);
13079 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13080 tp->misc_host_ctrl);
13082 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13085 tp->pdev_peer = tg3_find_peer(tp);
13087 /* Intentionally exclude ASIC_REV_5906 */
13088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13096 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13101 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13102 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13103 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13105 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13106 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13107 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13109 /* 5700 B0 chips do not support checksumming correctly due
13110 * to hardware bugs.
13112 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13113 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13115 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13116 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13117 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13118 tp->dev->features |= NETIF_F_IPV6_CSUM;
13121 /* Determine TSO capabilities */
13122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13124 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13125 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13127 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13128 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13129 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13131 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13132 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13133 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13134 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13135 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13136 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13138 tp->fw_needed = FIRMWARE_TG3TSO5;
13140 tp->fw_needed = FIRMWARE_TG3TSO;
13145 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13146 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13147 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13148 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13149 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13150 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13151 tp->pdev_peer == tp->pdev))
13152 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13154 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13156 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13161 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13162 tp->irq_max = TG3_IRQ_MAX_VECS;
13166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13168 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13169 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13170 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13171 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13176 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13178 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13179 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13180 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13181 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13183 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13186 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13187 if (tp->pcie_cap != 0) {
13190 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13192 pcie_set_readrq(tp->pdev, 4096);
13194 pci_read_config_word(tp->pdev,
13195 tp->pcie_cap + PCI_EXP_LNKCTL,
13197 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13199 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13202 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13203 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13204 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13205 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13206 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13208 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13209 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13210 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13211 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13212 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13213 if (!tp->pcix_cap) {
13214 printk(KERN_ERR PFX "Cannot find PCI-X "
13215 "capability, aborting.\n");
13219 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13220 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13223 /* If we have an AMD 762 or VIA K8T800 chipset, write
13224 * reordering to the mailbox registers done by the host
13225 * controller can cause major troubles. We read back from
13226 * every mailbox register write to force the writes to be
13227 * posted to the chip in order.
13229 if (pci_dev_present(write_reorder_chipsets) &&
13230 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13231 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13233 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13234 &tp->pci_cacheline_sz);
13235 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13236 &tp->pci_lat_timer);
13237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13238 tp->pci_lat_timer < 64) {
13239 tp->pci_lat_timer = 64;
13240 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13241 tp->pci_lat_timer);
13244 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13245 /* 5700 BX chips need to have their TX producer index
13246 * mailboxes written twice to workaround a bug.
13248 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13250 /* If we are in PCI-X mode, enable register write workaround.
13252 * The workaround is to use indirect register accesses
13253 * for all chip writes not to mailbox registers.
13255 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13258 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13260 /* The chip can have it's power management PCI config
13261 * space registers clobbered due to this bug.
13262 * So explicitly force the chip into D0 here.
13264 pci_read_config_dword(tp->pdev,
13265 tp->pm_cap + PCI_PM_CTRL,
13267 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13268 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13269 pci_write_config_dword(tp->pdev,
13270 tp->pm_cap + PCI_PM_CTRL,
13273 /* Also, force SERR#/PERR# in PCI command. */
13274 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13275 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13276 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13280 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13281 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13282 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13283 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13285 /* Chip-specific fixup from Broadcom driver */
13286 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13287 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13288 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13289 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13292 /* Default fast path register access methods */
13293 tp->read32 = tg3_read32;
13294 tp->write32 = tg3_write32;
13295 tp->read32_mbox = tg3_read32;
13296 tp->write32_mbox = tg3_write32;
13297 tp->write32_tx_mbox = tg3_write32;
13298 tp->write32_rx_mbox = tg3_write32;
13300 /* Various workaround register access methods */
13301 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13302 tp->write32 = tg3_write_indirect_reg32;
13303 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13304 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13305 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13307 * Back to back register writes can cause problems on these
13308 * chips, the workaround is to read back all reg writes
13309 * except those to mailbox regs.
13311 * See tg3_write_indirect_reg32().
13313 tp->write32 = tg3_write_flush_reg32;
13316 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13317 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13318 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13319 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13320 tp->write32_rx_mbox = tg3_write_flush_reg32;
13323 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13324 tp->read32 = tg3_read_indirect_reg32;
13325 tp->write32 = tg3_write_indirect_reg32;
13326 tp->read32_mbox = tg3_read_indirect_mbox;
13327 tp->write32_mbox = tg3_write_indirect_mbox;
13328 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13329 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13334 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13335 pci_cmd &= ~PCI_COMMAND_MEMORY;
13336 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13339 tp->read32_mbox = tg3_read32_mbox_5906;
13340 tp->write32_mbox = tg3_write32_mbox_5906;
13341 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13342 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13345 if (tp->write32 == tg3_write_indirect_reg32 ||
13346 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13347 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13349 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13351 /* Get eeprom hw config before calling tg3_set_power_state().
13352 * In particular, the TG3_FLG2_IS_NIC flag must be
13353 * determined before calling tg3_set_power_state() so that
13354 * we know whether or not to switch out of Vaux power.
13355 * When the flag is set, it means that GPIO1 is used for eeprom
13356 * write protect and also implies that it is a LOM where GPIOs
13357 * are not used to switch power.
13359 tg3_get_eeprom_hw_cfg(tp);
13361 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13362 /* Allow reads and writes to the
13363 * APE register and memory space.
13365 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13366 PCISTATE_ALLOW_APE_SHMEM_WR;
13367 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13372 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13373 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13374 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13377 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13379 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13380 * GPIO1 driven high will bring 5700's external PHY out of reset.
13381 * It is also used as eeprom write protect on LOMs.
13383 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13384 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13385 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13386 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13387 GRC_LCLCTRL_GPIO_OUTPUT1);
13388 /* Unused GPIO3 must be driven as output on 5752 because there
13389 * are no pull-up resistors on unused GPIO pins.
13391 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13392 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13397 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13399 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13401 /* Turn off the debug UART. */
13402 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13403 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13404 /* Keep VMain power. */
13405 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13406 GRC_LCLCTRL_GPIO_OUTPUT0;
13409 /* Force the chip into D0. */
13410 err = tg3_set_power_state(tp, PCI_D0);
13412 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
13413 pci_name(tp->pdev));
13417 /* Derive initial jumbo mode from MTU assigned in
13418 * ether_setup() via the alloc_etherdev() call
13420 if (tp->dev->mtu > ETH_DATA_LEN &&
13421 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13422 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13424 /* Determine WakeOnLan speed to use. */
13425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13426 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13427 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13428 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13429 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13431 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13435 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
13437 /* A few boards don't want Ethernet@WireSpeed phy feature */
13438 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13439 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13440 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13441 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13442 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
13443 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
13444 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13446 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13447 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13448 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13449 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13450 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13452 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13453 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13454 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13455 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13456 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
13457 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
13458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13462 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13463 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13464 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13465 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13466 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13468 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13472 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13473 tp->phy_otp = tg3_read_otp_phycfg(tp);
13474 if (tp->phy_otp == 0)
13475 tp->phy_otp = TG3_OTP_DEFAULT;
13478 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13479 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13481 tp->mi_mode = MAC_MI_MODE_BASE;
13483 tp->coalesce_mode = 0;
13484 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13485 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13486 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13490 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13492 err = tg3_mdio_init(tp);
13496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13497 (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
13498 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
13501 /* Initialize data/descriptor byte/word swapping. */
13502 val = tr32(GRC_MODE);
13503 val &= GRC_MODE_HOST_STACKUP;
13504 tw32(GRC_MODE, val | tp->grc_mode);
13506 tg3_switch_clocks(tp);
13508 /* Clear this out for sanity. */
13509 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13511 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13513 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13514 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13515 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13517 if (chiprevid == CHIPREV_ID_5701_A0 ||
13518 chiprevid == CHIPREV_ID_5701_B0 ||
13519 chiprevid == CHIPREV_ID_5701_B2 ||
13520 chiprevid == CHIPREV_ID_5701_B5) {
13521 void __iomem *sram_base;
13523 /* Write some dummy words into the SRAM status block
13524 * area, see if it reads back correctly. If the return
13525 * value is bad, force enable the PCIX workaround.
13527 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13529 writel(0x00000000, sram_base);
13530 writel(0x00000000, sram_base + 4);
13531 writel(0xffffffff, sram_base + 4);
13532 if (readl(sram_base) != 0x00000000)
13533 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13538 tg3_nvram_init(tp);
13540 grc_misc_cfg = tr32(GRC_MISC_CFG);
13541 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13544 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13545 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13546 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13548 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13549 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13550 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13551 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13552 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13553 HOSTCC_MODE_CLRTICK_TXBD);
13555 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13556 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13557 tp->misc_host_ctrl);
13560 /* Preserve the APE MAC_MODE bits */
13561 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13562 tp->mac_mode = tr32(MAC_MODE) |
13563 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13565 tp->mac_mode = TG3_DEF_MAC_MODE;
13567 /* these are limited to 10/100 only */
13568 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13569 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13570 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13571 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13572 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13573 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13574 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13575 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13576 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13577 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13578 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13579 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13580 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13581 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13583 err = tg3_phy_probe(tp);
13585 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
13586 pci_name(tp->pdev), err);
13587 /* ... but do not return immediately ... */
13591 tg3_read_partno(tp);
13592 tg3_read_fw_ver(tp);
13594 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13595 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13598 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13600 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13603 /* 5700 {AX,BX} chips have a broken status block link
13604 * change bit implementation, so we must use the
13605 * status register in those cases.
13607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13608 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13610 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13612 /* The led_ctrl is set during tg3_phy_probe, here we might
13613 * have to force the link status polling mechanism based
13614 * upon subsystem IDs.
13616 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13618 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13619 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13620 TG3_FLAG_USE_LINKCHG_REG);
13623 /* For all SERDES we poll the MAC status register. */
13624 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13625 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13627 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13629 tp->rx_offset = NET_IP_ALIGN;
13630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13631 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13634 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13636 /* Increment the rx prod index on the rx std ring by at most
13637 * 8 for these chips to workaround hw errata.
13639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13640 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13642 tp->rx_std_max_post = 8;
13644 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13645 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13646 PCIE_PWR_MGMT_L1_THRESH_MSK;
13651 #ifdef CONFIG_SPARC
13652 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13654 struct net_device *dev = tp->dev;
13655 struct pci_dev *pdev = tp->pdev;
13656 struct device_node *dp = pci_device_to_OF_node(pdev);
13657 const unsigned char *addr;
13660 addr = of_get_property(dp, "local-mac-address", &len);
13661 if (addr && len == 6) {
13662 memcpy(dev->dev_addr, addr, 6);
13663 memcpy(dev->perm_addr, dev->dev_addr, 6);
13669 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13671 struct net_device *dev = tp->dev;
13673 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13674 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13679 static int __devinit tg3_get_device_address(struct tg3 *tp)
13681 struct net_device *dev = tp->dev;
13682 u32 hi, lo, mac_offset;
13685 #ifdef CONFIG_SPARC
13686 if (!tg3_get_macaddr_sparc(tp))
13691 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13692 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13693 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13695 if (tg3_nvram_lock(tp))
13696 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13698 tg3_nvram_unlock(tp);
13699 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13700 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13702 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13705 /* First try to get it from MAC address mailbox. */
13706 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13707 if ((hi >> 16) == 0x484b) {
13708 dev->dev_addr[0] = (hi >> 8) & 0xff;
13709 dev->dev_addr[1] = (hi >> 0) & 0xff;
13711 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13712 dev->dev_addr[2] = (lo >> 24) & 0xff;
13713 dev->dev_addr[3] = (lo >> 16) & 0xff;
13714 dev->dev_addr[4] = (lo >> 8) & 0xff;
13715 dev->dev_addr[5] = (lo >> 0) & 0xff;
13717 /* Some old bootcode may report a 0 MAC address in SRAM */
13718 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13721 /* Next, try NVRAM. */
13722 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13723 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13724 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13725 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13726 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13728 /* Finally just fetch it out of the MAC control regs. */
13730 hi = tr32(MAC_ADDR_0_HIGH);
13731 lo = tr32(MAC_ADDR_0_LOW);
13733 dev->dev_addr[5] = lo & 0xff;
13734 dev->dev_addr[4] = (lo >> 8) & 0xff;
13735 dev->dev_addr[3] = (lo >> 16) & 0xff;
13736 dev->dev_addr[2] = (lo >> 24) & 0xff;
13737 dev->dev_addr[1] = hi & 0xff;
13738 dev->dev_addr[0] = (hi >> 8) & 0xff;
13742 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13743 #ifdef CONFIG_SPARC
13744 if (!tg3_get_default_macaddr_sparc(tp))
13749 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13753 #define BOUNDARY_SINGLE_CACHELINE 1
13754 #define BOUNDARY_MULTI_CACHELINE 2
13756 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13758 int cacheline_size;
13762 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13764 cacheline_size = 1024;
13766 cacheline_size = (int) byte * 4;
13768 /* On 5703 and later chips, the boundary bits have no
13771 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13772 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13773 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13776 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13777 goal = BOUNDARY_MULTI_CACHELINE;
13779 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13780 goal = BOUNDARY_SINGLE_CACHELINE;
13786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13787 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13788 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13795 /* PCI controllers on most RISC systems tend to disconnect
13796 * when a device tries to burst across a cache-line boundary.
13797 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13799 * Unfortunately, for PCI-E there are only limited
13800 * write-side controls for this, and thus for reads
13801 * we will still get the disconnects. We'll also waste
13802 * these PCI cycles for both read and write for chips
13803 * other than 5700 and 5701 which do not implement the
13806 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13807 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13808 switch (cacheline_size) {
13813 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13814 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13815 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13817 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13818 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13823 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13824 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13828 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13829 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13832 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13833 switch (cacheline_size) {
13837 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13838 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13839 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13845 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13846 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13850 switch (cacheline_size) {
13852 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13853 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13854 DMA_RWCTRL_WRITE_BNDRY_16);
13859 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13860 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13861 DMA_RWCTRL_WRITE_BNDRY_32);
13866 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13867 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13868 DMA_RWCTRL_WRITE_BNDRY_64);
13873 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13874 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13875 DMA_RWCTRL_WRITE_BNDRY_128);
13880 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13881 DMA_RWCTRL_WRITE_BNDRY_256);
13884 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13885 DMA_RWCTRL_WRITE_BNDRY_512);
13889 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13890 DMA_RWCTRL_WRITE_BNDRY_1024);
13899 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13901 struct tg3_internal_buffer_desc test_desc;
13902 u32 sram_dma_descs;
13905 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13907 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13908 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13909 tw32(RDMAC_STATUS, 0);
13910 tw32(WDMAC_STATUS, 0);
13912 tw32(BUFMGR_MODE, 0);
13913 tw32(FTQ_RESET, 0);
13915 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13916 test_desc.addr_lo = buf_dma & 0xffffffff;
13917 test_desc.nic_mbuf = 0x00002100;
13918 test_desc.len = size;
13921 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13922 * the *second* time the tg3 driver was getting loaded after an
13925 * Broadcom tells me:
13926 * ...the DMA engine is connected to the GRC block and a DMA
13927 * reset may affect the GRC block in some unpredictable way...
13928 * The behavior of resets to individual blocks has not been tested.
13930 * Broadcom noted the GRC reset will also reset all sub-components.
13933 test_desc.cqid_sqid = (13 << 8) | 2;
13935 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13938 test_desc.cqid_sqid = (16 << 8) | 7;
13940 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13943 test_desc.flags = 0x00000005;
13945 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13948 val = *(((u32 *)&test_desc) + i);
13949 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13950 sram_dma_descs + (i * sizeof(u32)));
13951 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13953 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13956 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13958 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13962 for (i = 0; i < 40; i++) {
13966 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13968 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13969 if ((val & 0xffff) == sram_dma_descs) {
13980 #define TEST_BUFFER_SIZE 0x2000
13982 static int __devinit tg3_test_dma(struct tg3 *tp)
13984 dma_addr_t buf_dma;
13985 u32 *buf, saved_dma_rwctrl;
13988 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13994 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13995 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13997 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14003 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14004 /* DMA read watermark not used on PCIE */
14005 tp->dma_rwctrl |= 0x00180000;
14006 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14009 tp->dma_rwctrl |= 0x003f0000;
14011 tp->dma_rwctrl |= 0x003f000f;
14013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14015 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14016 u32 read_water = 0x7;
14018 /* If the 5704 is behind the EPB bridge, we can
14019 * do the less restrictive ONE_DMA workaround for
14020 * better performance.
14022 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14024 tp->dma_rwctrl |= 0x8000;
14025 else if (ccval == 0x6 || ccval == 0x7)
14026 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14030 /* Set bit 23 to enable PCIX hw bug fix */
14032 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14033 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14035 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14036 /* 5780 always in PCIX mode */
14037 tp->dma_rwctrl |= 0x00144000;
14038 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14039 /* 5714 always in PCIX mode */
14040 tp->dma_rwctrl |= 0x00148000;
14042 tp->dma_rwctrl |= 0x001b000f;
14046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14048 tp->dma_rwctrl &= 0xfffffff0;
14050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14052 /* Remove this if it causes problems for some boards. */
14053 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14055 /* On 5700/5701 chips, we need to set this bit.
14056 * Otherwise the chip will issue cacheline transactions
14057 * to streamable DMA memory with not all the byte
14058 * enables turned on. This is an error on several
14059 * RISC PCI controllers, in particular sparc64.
14061 * On 5703/5704 chips, this bit has been reassigned
14062 * a different meaning. In particular, it is used
14063 * on those chips to enable a PCI-X workaround.
14065 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14068 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14071 /* Unneeded, already done by tg3_get_invariants. */
14072 tg3_switch_clocks(tp);
14075 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14076 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14079 /* It is best to perform DMA test with maximum write burst size
14080 * to expose the 5700/5701 write DMA bug.
14082 saved_dma_rwctrl = tp->dma_rwctrl;
14083 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14084 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14089 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14092 /* Send the buffer to the chip. */
14093 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14095 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
14100 /* validate data reached card RAM correctly. */
14101 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14103 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14104 if (le32_to_cpu(val) != p[i]) {
14105 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
14106 /* ret = -ENODEV here? */
14111 /* Now read it back. */
14112 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14114 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
14120 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14124 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14125 DMA_RWCTRL_WRITE_BNDRY_16) {
14126 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14127 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14128 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14131 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
14137 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14143 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14144 DMA_RWCTRL_WRITE_BNDRY_16) {
14145 static struct pci_device_id dma_wait_state_chipsets[] = {
14146 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14147 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14151 /* DMA test passed without adjusting DMA boundary,
14152 * now look for chipsets that are known to expose the
14153 * DMA bug without failing the test.
14155 if (pci_dev_present(dma_wait_state_chipsets)) {
14156 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14157 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14160 /* Safe to use the calculated DMA boundary. */
14161 tp->dma_rwctrl = saved_dma_rwctrl;
14163 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14167 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14172 static void __devinit tg3_init_link_config(struct tg3 *tp)
14174 tp->link_config.advertising =
14175 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14176 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14177 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14178 ADVERTISED_Autoneg | ADVERTISED_MII);
14179 tp->link_config.speed = SPEED_INVALID;
14180 tp->link_config.duplex = DUPLEX_INVALID;
14181 tp->link_config.autoneg = AUTONEG_ENABLE;
14182 tp->link_config.active_speed = SPEED_INVALID;
14183 tp->link_config.active_duplex = DUPLEX_INVALID;
14184 tp->link_config.phy_is_low_power = 0;
14185 tp->link_config.orig_speed = SPEED_INVALID;
14186 tp->link_config.orig_duplex = DUPLEX_INVALID;
14187 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14190 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14194 tp->bufmgr_config.mbuf_read_dma_low_water =
14195 DEFAULT_MB_RDMA_LOW_WATER_5705;
14196 tp->bufmgr_config.mbuf_mac_rx_low_water =
14197 DEFAULT_MB_MACRX_LOW_WATER_57765;
14198 tp->bufmgr_config.mbuf_high_water =
14199 DEFAULT_MB_HIGH_WATER_57765;
14201 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14202 DEFAULT_MB_RDMA_LOW_WATER_5705;
14203 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14204 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14205 tp->bufmgr_config.mbuf_high_water_jumbo =
14206 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14207 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14208 tp->bufmgr_config.mbuf_read_dma_low_water =
14209 DEFAULT_MB_RDMA_LOW_WATER_5705;
14210 tp->bufmgr_config.mbuf_mac_rx_low_water =
14211 DEFAULT_MB_MACRX_LOW_WATER_5705;
14212 tp->bufmgr_config.mbuf_high_water =
14213 DEFAULT_MB_HIGH_WATER_5705;
14214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14215 tp->bufmgr_config.mbuf_mac_rx_low_water =
14216 DEFAULT_MB_MACRX_LOW_WATER_5906;
14217 tp->bufmgr_config.mbuf_high_water =
14218 DEFAULT_MB_HIGH_WATER_5906;
14221 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14222 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14223 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14224 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14225 tp->bufmgr_config.mbuf_high_water_jumbo =
14226 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14228 tp->bufmgr_config.mbuf_read_dma_low_water =
14229 DEFAULT_MB_RDMA_LOW_WATER;
14230 tp->bufmgr_config.mbuf_mac_rx_low_water =
14231 DEFAULT_MB_MACRX_LOW_WATER;
14232 tp->bufmgr_config.mbuf_high_water =
14233 DEFAULT_MB_HIGH_WATER;
14235 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14236 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14237 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14238 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14239 tp->bufmgr_config.mbuf_high_water_jumbo =
14240 DEFAULT_MB_HIGH_WATER_JUMBO;
14243 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14244 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14247 static char * __devinit tg3_phy_string(struct tg3 *tp)
14249 switch (tp->phy_id & PHY_ID_MASK) {
14250 case PHY_ID_BCM5400: return "5400";
14251 case PHY_ID_BCM5401: return "5401";
14252 case PHY_ID_BCM5411: return "5411";
14253 case PHY_ID_BCM5701: return "5701";
14254 case PHY_ID_BCM5703: return "5703";
14255 case PHY_ID_BCM5704: return "5704";
14256 case PHY_ID_BCM5705: return "5705";
14257 case PHY_ID_BCM5750: return "5750";
14258 case PHY_ID_BCM5752: return "5752";
14259 case PHY_ID_BCM5714: return "5714";
14260 case PHY_ID_BCM5780: return "5780";
14261 case PHY_ID_BCM5755: return "5755";
14262 case PHY_ID_BCM5787: return "5787";
14263 case PHY_ID_BCM5784: return "5784";
14264 case PHY_ID_BCM5756: return "5722/5756";
14265 case PHY_ID_BCM5906: return "5906";
14266 case PHY_ID_BCM5761: return "5761";
14267 case PHY_ID_BCM5718C: return "5718C";
14268 case PHY_ID_BCM5718S: return "5718S";
14269 case PHY_ID_BCM57765: return "57765";
14270 case PHY_ID_BCM8002: return "8002/serdes";
14271 case 0: return "serdes";
14272 default: return "unknown";
14276 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14278 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14279 strcpy(str, "PCI Express");
14281 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14282 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14284 strcpy(str, "PCIX:");
14286 if ((clock_ctrl == 7) ||
14287 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14288 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14289 strcat(str, "133MHz");
14290 else if (clock_ctrl == 0)
14291 strcat(str, "33MHz");
14292 else if (clock_ctrl == 2)
14293 strcat(str, "50MHz");
14294 else if (clock_ctrl == 4)
14295 strcat(str, "66MHz");
14296 else if (clock_ctrl == 6)
14297 strcat(str, "100MHz");
14299 strcpy(str, "PCI:");
14300 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14301 strcat(str, "66MHz");
14303 strcat(str, "33MHz");
14305 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14306 strcat(str, ":32-bit");
14308 strcat(str, ":64-bit");
14312 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14314 struct pci_dev *peer;
14315 unsigned int func, devnr = tp->pdev->devfn & ~7;
14317 for (func = 0; func < 8; func++) {
14318 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14319 if (peer && peer != tp->pdev)
14323 /* 5704 can be configured in single-port mode, set peer to
14324 * tp->pdev in that case.
14332 * We don't need to keep the refcount elevated; there's no way
14333 * to remove one half of this device without removing the other
14340 static void __devinit tg3_init_coal(struct tg3 *tp)
14342 struct ethtool_coalesce *ec = &tp->coal;
14344 memset(ec, 0, sizeof(*ec));
14345 ec->cmd = ETHTOOL_GCOALESCE;
14346 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14347 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14348 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14349 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14350 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14351 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14352 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14353 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14354 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14356 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14357 HOSTCC_MODE_CLRTICK_TXBD)) {
14358 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14359 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14360 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14361 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14364 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14365 ec->rx_coalesce_usecs_irq = 0;
14366 ec->tx_coalesce_usecs_irq = 0;
14367 ec->stats_block_coalesce_usecs = 0;
14371 static const struct net_device_ops tg3_netdev_ops = {
14372 .ndo_open = tg3_open,
14373 .ndo_stop = tg3_close,
14374 .ndo_start_xmit = tg3_start_xmit,
14375 .ndo_get_stats = tg3_get_stats,
14376 .ndo_validate_addr = eth_validate_addr,
14377 .ndo_set_multicast_list = tg3_set_rx_mode,
14378 .ndo_set_mac_address = tg3_set_mac_addr,
14379 .ndo_do_ioctl = tg3_ioctl,
14380 .ndo_tx_timeout = tg3_tx_timeout,
14381 .ndo_change_mtu = tg3_change_mtu,
14382 #if TG3_VLAN_TAG_USED
14383 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14385 #ifdef CONFIG_NET_POLL_CONTROLLER
14386 .ndo_poll_controller = tg3_poll_controller,
14390 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14391 .ndo_open = tg3_open,
14392 .ndo_stop = tg3_close,
14393 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14394 .ndo_get_stats = tg3_get_stats,
14395 .ndo_validate_addr = eth_validate_addr,
14396 .ndo_set_multicast_list = tg3_set_rx_mode,
14397 .ndo_set_mac_address = tg3_set_mac_addr,
14398 .ndo_do_ioctl = tg3_ioctl,
14399 .ndo_tx_timeout = tg3_tx_timeout,
14400 .ndo_change_mtu = tg3_change_mtu,
14401 #if TG3_VLAN_TAG_USED
14402 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14404 #ifdef CONFIG_NET_POLL_CONTROLLER
14405 .ndo_poll_controller = tg3_poll_controller,
14409 static int __devinit tg3_init_one(struct pci_dev *pdev,
14410 const struct pci_device_id *ent)
14412 static int tg3_version_printed = 0;
14413 struct net_device *dev;
14415 int i, err, pm_cap;
14416 u32 sndmbx, rcvmbx, intmbx;
14418 u64 dma_mask, persist_dma_mask;
14420 if (tg3_version_printed++ == 0)
14421 printk(KERN_INFO "%s", version);
14423 err = pci_enable_device(pdev);
14425 printk(KERN_ERR PFX "Cannot enable PCI device, "
14430 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14432 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
14434 goto err_out_disable_pdev;
14437 pci_set_master(pdev);
14439 /* Find power-management capability. */
14440 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14442 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
14445 goto err_out_free_res;
14448 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14450 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
14452 goto err_out_free_res;
14455 SET_NETDEV_DEV(dev, &pdev->dev);
14457 #if TG3_VLAN_TAG_USED
14458 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14461 tp = netdev_priv(dev);
14464 tp->pm_cap = pm_cap;
14465 tp->rx_mode = TG3_DEF_RX_MODE;
14466 tp->tx_mode = TG3_DEF_TX_MODE;
14469 tp->msg_enable = tg3_debug;
14471 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14473 /* The word/byte swap controls here control register access byte
14474 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14477 tp->misc_host_ctrl =
14478 MISC_HOST_CTRL_MASK_PCI_INT |
14479 MISC_HOST_CTRL_WORD_SWAP |
14480 MISC_HOST_CTRL_INDIR_ACCESS |
14481 MISC_HOST_CTRL_PCISTATE_RW;
14483 /* The NONFRM (non-frame) byte/word swap controls take effect
14484 * on descriptor entries, anything which isn't packet data.
14486 * The StrongARM chips on the board (one for tx, one for rx)
14487 * are running in big-endian mode.
14489 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14490 GRC_MODE_WSWAP_NONFRM_DATA);
14491 #ifdef __BIG_ENDIAN
14492 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14494 spin_lock_init(&tp->lock);
14495 spin_lock_init(&tp->indirect_lock);
14496 INIT_WORK(&tp->reset_task, tg3_reset_task);
14498 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14500 printk(KERN_ERR PFX "Cannot map device registers, "
14503 goto err_out_free_dev;
14506 tg3_init_link_config(tp);
14508 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14509 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14511 dev->ethtool_ops = &tg3_ethtool_ops;
14512 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14513 dev->irq = pdev->irq;
14515 err = tg3_get_invariants(tp);
14517 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
14519 goto err_out_iounmap;
14522 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14523 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14524 dev->netdev_ops = &tg3_netdev_ops;
14526 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14529 /* The EPB bridge inside 5714, 5715, and 5780 and any
14530 * device behind the EPB cannot support DMA addresses > 40-bit.
14531 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14532 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14533 * do DMA address check in tg3_start_xmit().
14535 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14536 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14537 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14538 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14539 #ifdef CONFIG_HIGHMEM
14540 dma_mask = DMA_BIT_MASK(64);
14543 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14545 /* Configure DMA attributes. */
14546 if (dma_mask > DMA_BIT_MASK(32)) {
14547 err = pci_set_dma_mask(pdev, dma_mask);
14549 dev->features |= NETIF_F_HIGHDMA;
14550 err = pci_set_consistent_dma_mask(pdev,
14553 printk(KERN_ERR PFX "Unable to obtain 64 bit "
14554 "DMA for consistent allocations\n");
14555 goto err_out_iounmap;
14559 if (err || dma_mask == DMA_BIT_MASK(32)) {
14560 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14562 printk(KERN_ERR PFX "No usable DMA configuration, "
14564 goto err_out_iounmap;
14568 tg3_init_bufmgr_config(tp);
14570 /* Selectively allow TSO based on operating conditions */
14571 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14572 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14573 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14575 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14576 tp->fw_needed = NULL;
14579 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14580 tp->fw_needed = FIRMWARE_TG3;
14582 /* TSO is on by default on chips that support hardware TSO.
14583 * Firmware TSO on older chips gives lower performance, so it
14584 * is off by default, but can be enabled using ethtool.
14586 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14587 (dev->features & NETIF_F_IP_CSUM))
14588 dev->features |= NETIF_F_TSO;
14590 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14591 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14592 if (dev->features & NETIF_F_IPV6_CSUM)
14593 dev->features |= NETIF_F_TSO6;
14594 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14595 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14596 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14597 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14600 dev->features |= NETIF_F_TSO_ECN;
14603 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14604 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14605 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14606 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14607 tp->rx_pending = 63;
14610 err = tg3_get_device_address(tp);
14612 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
14614 goto err_out_iounmap;
14617 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14618 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14619 if (!tp->aperegs) {
14620 printk(KERN_ERR PFX "Cannot map APE registers, "
14623 goto err_out_iounmap;
14626 tg3_ape_lock_init(tp);
14628 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14629 tg3_read_dash_ver(tp);
14633 * Reset chip in case UNDI or EFI driver did not shutdown
14634 * DMA self test will enable WDMAC and we'll see (spurious)
14635 * pending DMA on the PCI bus at that point.
14637 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14638 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14639 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14640 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14643 err = tg3_test_dma(tp);
14645 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
14646 goto err_out_apeunmap;
14649 /* flow control autonegotiation is default behavior */
14650 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14651 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14653 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14654 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14655 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14656 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14657 struct tg3_napi *tnapi = &tp->napi[i];
14660 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14662 tnapi->int_mbox = intmbx;
14668 tnapi->consmbox = rcvmbx;
14669 tnapi->prodmbox = sndmbx;
14672 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14673 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14675 tnapi->coal_now = HOSTCC_MODE_NOW;
14676 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14679 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14683 * If we support MSIX, we'll be using RSS. If we're using
14684 * RSS, the first vector only handles link interrupts and the
14685 * remaining vectors handle rx and tx interrupts. Reuse the
14686 * mailbox values for the next iteration. The values we setup
14687 * above are still useful for the single vectored mode.
14702 pci_set_drvdata(pdev, dev);
14704 err = register_netdev(dev);
14706 printk(KERN_ERR PFX "Cannot register net device, "
14708 goto err_out_apeunmap;
14711 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14713 tp->board_part_number,
14714 tp->pci_chip_rev_id,
14715 tg3_bus_string(tp, str),
14718 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14719 struct phy_device *phydev;
14720 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14722 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14723 tp->dev->name, phydev->drv->name,
14724 dev_name(&phydev->dev));
14727 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14728 tp->dev->name, tg3_phy_string(tp),
14729 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14730 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14731 "10/100/1000Base-T")),
14732 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14734 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14736 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14737 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14738 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14739 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14740 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14741 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14742 dev->name, tp->dma_rwctrl,
14743 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14744 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14750 iounmap(tp->aperegs);
14751 tp->aperegs = NULL;
14764 pci_release_regions(pdev);
14766 err_out_disable_pdev:
14767 pci_disable_device(pdev);
14768 pci_set_drvdata(pdev, NULL);
14772 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14774 struct net_device *dev = pci_get_drvdata(pdev);
14777 struct tg3 *tp = netdev_priv(dev);
14780 release_firmware(tp->fw);
14782 flush_scheduled_work();
14784 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14789 unregister_netdev(dev);
14791 iounmap(tp->aperegs);
14792 tp->aperegs = NULL;
14799 pci_release_regions(pdev);
14800 pci_disable_device(pdev);
14801 pci_set_drvdata(pdev, NULL);
14805 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14807 struct net_device *dev = pci_get_drvdata(pdev);
14808 struct tg3 *tp = netdev_priv(dev);
14809 pci_power_t target_state;
14812 /* PCI register 4 needs to be saved whether netif_running() or not.
14813 * MSI address and data need to be saved if using MSI and
14816 pci_save_state(pdev);
14818 if (!netif_running(dev))
14821 flush_scheduled_work();
14823 tg3_netif_stop(tp);
14825 del_timer_sync(&tp->timer);
14827 tg3_full_lock(tp, 1);
14828 tg3_disable_ints(tp);
14829 tg3_full_unlock(tp);
14831 netif_device_detach(dev);
14833 tg3_full_lock(tp, 0);
14834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14835 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14836 tg3_full_unlock(tp);
14838 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14840 err = tg3_set_power_state(tp, target_state);
14844 tg3_full_lock(tp, 0);
14846 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14847 err2 = tg3_restart_hw(tp, 1);
14851 tp->timer.expires = jiffies + tp->timer_offset;
14852 add_timer(&tp->timer);
14854 netif_device_attach(dev);
14855 tg3_netif_start(tp);
14858 tg3_full_unlock(tp);
14867 static int tg3_resume(struct pci_dev *pdev)
14869 struct net_device *dev = pci_get_drvdata(pdev);
14870 struct tg3 *tp = netdev_priv(dev);
14873 pci_restore_state(tp->pdev);
14875 if (!netif_running(dev))
14878 err = tg3_set_power_state(tp, PCI_D0);
14882 netif_device_attach(dev);
14884 tg3_full_lock(tp, 0);
14886 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14887 err = tg3_restart_hw(tp, 1);
14891 tp->timer.expires = jiffies + tp->timer_offset;
14892 add_timer(&tp->timer);
14894 tg3_netif_start(tp);
14897 tg3_full_unlock(tp);
14905 static struct pci_driver tg3_driver = {
14906 .name = DRV_MODULE_NAME,
14907 .id_table = tg3_pci_tbl,
14908 .probe = tg3_init_one,
14909 .remove = __devexit_p(tg3_remove_one),
14910 .suspend = tg3_suspend,
14911 .resume = tg3_resume
14914 static int __init tg3_init(void)
14916 return pci_register_driver(&tg3_driver);
14919 static void __exit tg3_cleanup(void)
14921 pci_unregister_driver(&tg3_driver);
14924 module_init(tg3_init);
14925 module_exit(tg3_cleanup);