2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.101"
72 #define DRV_MODULE_RELDATE "August 28, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 #define TG3_DMA_BYTE_ENAB 64
130 #define TG3_RX_STD_DMA_SZ 1536
131 #define TG3_RX_JMB_DMA_SZ 9046
133 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
135 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
136 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
138 /* minimum number of free TX descriptors required to wake up TX process */
139 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
141 #define TG3_RAW_IP_ALIGN 2
143 /* number of ETHTOOL_GSTATS u64's */
144 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
146 #define TG3_NUM_TEST 6
148 #define FIRMWARE_TG3 "tigon/tg3.bin"
149 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
150 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
152 static char version[] __devinitdata =
153 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
155 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
156 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
157 MODULE_LICENSE("GPL");
158 MODULE_VERSION(DRV_MODULE_VERSION);
159 MODULE_FIRMWARE(FIRMWARE_TG3);
160 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
161 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
163 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
165 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
166 module_param(tg3_debug, int, 0);
167 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
169 static struct pci_device_id tg3_pci_tbl[] = {
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
236 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
237 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
238 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
239 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
241 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
242 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
246 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248 static const struct {
249 const char string[ETH_GSTRING_LEN];
250 } ethtool_stats_keys[TG3_NUM_STATS] = {
253 { "rx_ucast_packets" },
254 { "rx_mcast_packets" },
255 { "rx_bcast_packets" },
257 { "rx_align_errors" },
258 { "rx_xon_pause_rcvd" },
259 { "rx_xoff_pause_rcvd" },
260 { "rx_mac_ctrl_rcvd" },
261 { "rx_xoff_entered" },
262 { "rx_frame_too_long_errors" },
264 { "rx_undersize_packets" },
265 { "rx_in_length_errors" },
266 { "rx_out_length_errors" },
267 { "rx_64_or_less_octet_packets" },
268 { "rx_65_to_127_octet_packets" },
269 { "rx_128_to_255_octet_packets" },
270 { "rx_256_to_511_octet_packets" },
271 { "rx_512_to_1023_octet_packets" },
272 { "rx_1024_to_1522_octet_packets" },
273 { "rx_1523_to_2047_octet_packets" },
274 { "rx_2048_to_4095_octet_packets" },
275 { "rx_4096_to_8191_octet_packets" },
276 { "rx_8192_to_9022_octet_packets" },
283 { "tx_flow_control" },
285 { "tx_single_collisions" },
286 { "tx_mult_collisions" },
288 { "tx_excessive_collisions" },
289 { "tx_late_collisions" },
290 { "tx_collide_2times" },
291 { "tx_collide_3times" },
292 { "tx_collide_4times" },
293 { "tx_collide_5times" },
294 { "tx_collide_6times" },
295 { "tx_collide_7times" },
296 { "tx_collide_8times" },
297 { "tx_collide_9times" },
298 { "tx_collide_10times" },
299 { "tx_collide_11times" },
300 { "tx_collide_12times" },
301 { "tx_collide_13times" },
302 { "tx_collide_14times" },
303 { "tx_collide_15times" },
304 { "tx_ucast_packets" },
305 { "tx_mcast_packets" },
306 { "tx_bcast_packets" },
307 { "tx_carrier_sense_errors" },
311 { "dma_writeq_full" },
312 { "dma_write_prioq_full" },
316 { "rx_threshold_hit" },
318 { "dma_readq_full" },
319 { "dma_read_prioq_full" },
320 { "tx_comp_queue_full" },
322 { "ring_set_send_prod_index" },
323 { "ring_status_update" },
325 { "nic_avoided_irqs" },
326 { "nic_tx_threshold_hit" }
329 static const struct {
330 const char string[ETH_GSTRING_LEN];
331 } ethtool_test_keys[TG3_NUM_TEST] = {
332 { "nvram test (online) " },
333 { "link test (online) " },
334 { "register test (offline)" },
335 { "memory test (offline)" },
336 { "loopback test (offline)" },
337 { "interrupt test (offline)" },
340 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
342 writel(val, tp->regs + off);
345 static u32 tg3_read32(struct tg3 *tp, u32 off)
347 return (readl(tp->regs + off));
350 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
352 writel(val, tp->aperegs + off);
355 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
357 return (readl(tp->aperegs + off));
360 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
364 spin_lock_irqsave(&tp->indirect_lock, flags);
365 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
366 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
367 spin_unlock_irqrestore(&tp->indirect_lock, flags);
370 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
372 writel(val, tp->regs + off);
373 readl(tp->regs + off);
376 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
383 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
388 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
392 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
393 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
394 TG3_64BIT_REG_LOW, val);
397 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
398 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
399 TG3_64BIT_REG_LOW, val);
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
405 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
406 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408 /* In indirect mode when disabling interrupts, we also need
409 * to clear the interrupt bit in the GRC local ctrl register.
411 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
413 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
414 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
418 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
425 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 /* usec_wait specifies the wait time in usec when writing to certain registers
431 * where it is unsafe to read back the register without some delay.
432 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
433 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
435 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
437 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
438 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
439 /* Non-posted methods */
440 tp->write32(tp, off, val);
443 tg3_write32(tp, off, val);
448 /* Wait again after the read for the posted method to guarantee that
449 * the wait time is met.
455 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
457 tp->write32_mbox(tp, off, val);
458 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
459 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
460 tp->read32_mbox(tp, off);
463 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
465 void __iomem *mbox = tp->regs + off;
467 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
469 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
473 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
475 return (readl(tp->regs + off + GRCMBOX_BASE));
478 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
480 writel(val, tp->regs + off + GRCMBOX_BASE);
483 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
484 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
485 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
486 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
487 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
489 #define tw32(reg,val) tp->write32(tp, reg, val)
490 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
491 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
492 #define tr32(reg) tp->read32(tp, reg)
494 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
499 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
502 spin_lock_irqsave(&tp->indirect_lock, flags);
503 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
507 /* Always leave this as zero. */
508 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
510 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511 tw32_f(TG3PCI_MEM_WIN_DATA, val);
513 /* Always leave this as zero. */
514 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
516 spin_unlock_irqrestore(&tp->indirect_lock, flags);
519 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
524 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
529 spin_lock_irqsave(&tp->indirect_lock, flags);
530 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
531 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
532 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
534 /* Always leave this as zero. */
535 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
537 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
538 *val = tr32(TG3PCI_MEM_WIN_DATA);
540 /* Always leave this as zero. */
541 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
543 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546 static void tg3_ape_lock_init(struct tg3 *tp)
550 /* Make sure the driver hasn't any stale locks. */
551 for (i = 0; i < 8; i++)
552 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
553 APE_LOCK_GRANT_DRIVER);
556 static int tg3_ape_lock(struct tg3 *tp, int locknum)
562 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
566 case TG3_APE_LOCK_GRC:
567 case TG3_APE_LOCK_MEM:
575 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
577 /* Wait for up to 1 millisecond to acquire lock. */
578 for (i = 0; i < 100; i++) {
579 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
580 if (status == APE_LOCK_GRANT_DRIVER)
585 if (status != APE_LOCK_GRANT_DRIVER) {
586 /* Revoke the lock request. */
587 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
588 APE_LOCK_GRANT_DRIVER);
596 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
600 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
604 case TG3_APE_LOCK_GRC:
605 case TG3_APE_LOCK_MEM:
612 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
615 static void tg3_disable_ints(struct tg3 *tp)
619 tw32(TG3PCI_MISC_HOST_CTRL,
620 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
621 for (i = 0; i < tp->irq_max; i++)
622 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
625 static void tg3_enable_ints(struct tg3 *tp)
633 tw32(TG3PCI_MISC_HOST_CTRL,
634 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
636 for (i = 0; i < tp->irq_cnt; i++) {
637 struct tg3_napi *tnapi = &tp->napi[i];
638 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
639 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
640 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
642 coal_now |= tnapi->coal_now;
645 /* Force an initial interrupt */
646 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
647 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
648 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
650 tw32(HOSTCC_MODE, tp->coalesce_mode |
651 HOSTCC_MODE_ENABLE | coal_now);
654 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
656 struct tg3 *tp = tnapi->tp;
657 struct tg3_hw_status *sblk = tnapi->hw_status;
658 unsigned int work_exists = 0;
660 /* check for phy events */
661 if (!(tp->tg3_flags &
662 (TG3_FLAG_USE_LINKCHG_REG |
663 TG3_FLAG_POLL_SERDES))) {
664 if (sblk->status & SD_STATUS_LINK_CHG)
667 /* check for RX/TX work to do */
668 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
669 sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
676 * similar to tg3_enable_ints, but it accurately determines whether there
677 * is new work pending and can return without flushing the PIO write
678 * which reenables interrupts
680 static void tg3_int_reenable(struct tg3_napi *tnapi)
682 struct tg3 *tp = tnapi->tp;
684 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
687 /* When doing tagged status, this work check is unnecessary.
688 * The last_tag we write above tells the chip which piece of
689 * work we've completed.
691 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
693 tw32(HOSTCC_MODE, tp->coalesce_mode |
694 HOSTCC_MODE_ENABLE | tnapi->coal_now);
697 static void tg3_napi_disable(struct tg3 *tp)
701 for (i = tp->irq_cnt - 1; i >= 0; i--)
702 napi_disable(&tp->napi[i].napi);
705 static void tg3_napi_enable(struct tg3 *tp)
709 for (i = 0; i < tp->irq_cnt; i++)
710 napi_enable(&tp->napi[i].napi);
713 static inline void tg3_netif_stop(struct tg3 *tp)
715 tp->dev->trans_start = jiffies; /* prevent tx timeout */
716 tg3_napi_disable(tp);
717 netif_tx_disable(tp->dev);
720 static inline void tg3_netif_start(struct tg3 *tp)
722 /* NOTE: unconditional netif_tx_wake_all_queues is only
723 * appropriate so long as all callers are assured to
724 * have free tx slots (such as after tg3_init_hw)
726 netif_tx_wake_all_queues(tp->dev);
729 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
733 static void tg3_switch_clocks(struct tg3 *tp)
735 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
738 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
739 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
742 orig_clock_ctrl = clock_ctrl;
743 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
744 CLOCK_CTRL_CLKRUN_OENABLE |
746 tp->pci_clock_ctrl = clock_ctrl;
748 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
749 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
750 tw32_wait_f(TG3PCI_CLOCK_CTRL,
751 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
753 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
754 tw32_wait_f(TG3PCI_CLOCK_CTRL,
756 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
758 tw32_wait_f(TG3PCI_CLOCK_CTRL,
759 clock_ctrl | (CLOCK_CTRL_ALTCLK),
762 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
765 #define PHY_BUSY_LOOPS 5000
767 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
773 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
775 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
782 MI_COM_PHY_ADDR_MASK);
783 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
784 MI_COM_REG_ADDR_MASK);
785 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
787 tw32_f(MAC_MI_COM, frame_val);
789 loops = PHY_BUSY_LOOPS;
792 frame_val = tr32(MAC_MI_COM);
794 if ((frame_val & MI_COM_BUSY) == 0) {
796 frame_val = tr32(MAC_MI_COM);
804 *val = frame_val & MI_COM_DATA_MASK;
808 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
809 tw32_f(MAC_MI_MODE, tp->mi_mode);
816 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
822 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
823 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
826 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
832 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
833 MI_COM_PHY_ADDR_MASK);
834 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
835 MI_COM_REG_ADDR_MASK);
836 frame_val |= (val & MI_COM_DATA_MASK);
837 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
839 tw32_f(MAC_MI_COM, frame_val);
841 loops = PHY_BUSY_LOOPS;
844 frame_val = tr32(MAC_MI_COM);
845 if ((frame_val & MI_COM_BUSY) == 0) {
847 frame_val = tr32(MAC_MI_COM);
857 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
858 tw32_f(MAC_MI_MODE, tp->mi_mode);
865 static int tg3_bmcr_reset(struct tg3 *tp)
870 /* OK, reset it, and poll the BMCR_RESET bit until it
871 * clears or we time out.
873 phy_control = BMCR_RESET;
874 err = tg3_writephy(tp, MII_BMCR, phy_control);
880 err = tg3_readphy(tp, MII_BMCR, &phy_control);
884 if ((phy_control & BMCR_RESET) == 0) {
896 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
898 struct tg3 *tp = bp->priv;
901 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
904 if (tg3_readphy(tp, reg, &val))
910 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
912 struct tg3 *tp = bp->priv;
914 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
917 if (tg3_writephy(tp, reg, val))
923 static int tg3_mdio_reset(struct mii_bus *bp)
928 static void tg3_mdio_config_5785(struct tg3 *tp)
931 struct phy_device *phydev;
933 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
934 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
935 case TG3_PHY_ID_BCM50610:
936 val = MAC_PHYCFG2_50610_LED_MODES;
938 case TG3_PHY_ID_BCMAC131:
939 val = MAC_PHYCFG2_AC131_LED_MODES;
941 case TG3_PHY_ID_RTL8211C:
942 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
944 case TG3_PHY_ID_RTL8201E:
945 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
951 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
952 tw32(MAC_PHYCFG2, val);
954 val = tr32(MAC_PHYCFG1);
955 val &= ~(MAC_PHYCFG1_RGMII_INT |
956 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
957 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
958 tw32(MAC_PHYCFG1, val);
963 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
964 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
965 MAC_PHYCFG2_FMODE_MASK_MASK |
966 MAC_PHYCFG2_GMODE_MASK_MASK |
967 MAC_PHYCFG2_ACT_MASK_MASK |
968 MAC_PHYCFG2_QUAL_MASK_MASK |
969 MAC_PHYCFG2_INBAND_ENABLE;
971 tw32(MAC_PHYCFG2, val);
973 val = tr32(MAC_PHYCFG1);
974 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
975 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
976 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
977 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
978 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
979 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
980 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
982 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
983 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
984 tw32(MAC_PHYCFG1, val);
986 val = tr32(MAC_EXT_RGMII_MODE);
987 val &= ~(MAC_RGMII_MODE_RX_INT_B |
988 MAC_RGMII_MODE_RX_QUALITY |
989 MAC_RGMII_MODE_RX_ACTIVITY |
990 MAC_RGMII_MODE_RX_ENG_DET |
991 MAC_RGMII_MODE_TX_ENABLE |
992 MAC_RGMII_MODE_TX_LOWPWR |
993 MAC_RGMII_MODE_TX_RESET);
994 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
995 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
996 val |= MAC_RGMII_MODE_RX_INT_B |
997 MAC_RGMII_MODE_RX_QUALITY |
998 MAC_RGMII_MODE_RX_ACTIVITY |
999 MAC_RGMII_MODE_RX_ENG_DET;
1000 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1001 val |= MAC_RGMII_MODE_TX_ENABLE |
1002 MAC_RGMII_MODE_TX_LOWPWR |
1003 MAC_RGMII_MODE_TX_RESET;
1005 tw32(MAC_EXT_RGMII_MODE, val);
1008 static void tg3_mdio_start(struct tg3 *tp)
1010 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1011 mutex_lock(&tp->mdio_bus->mdio_lock);
1012 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1013 mutex_unlock(&tp->mdio_bus->mdio_lock);
1016 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1017 tw32_f(MAC_MI_MODE, tp->mi_mode);
1020 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1022 tg3_mdio_config_5785(tp);
1025 static void tg3_mdio_stop(struct tg3 *tp)
1027 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1028 mutex_lock(&tp->mdio_bus->mdio_lock);
1029 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1030 mutex_unlock(&tp->mdio_bus->mdio_lock);
1034 static int tg3_mdio_init(struct tg3 *tp)
1038 struct phy_device *phydev;
1042 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1043 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1046 tp->mdio_bus = mdiobus_alloc();
1047 if (tp->mdio_bus == NULL)
1050 tp->mdio_bus->name = "tg3 mdio bus";
1051 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1052 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1053 tp->mdio_bus->priv = tp;
1054 tp->mdio_bus->parent = &tp->pdev->dev;
1055 tp->mdio_bus->read = &tg3_mdio_read;
1056 tp->mdio_bus->write = &tg3_mdio_write;
1057 tp->mdio_bus->reset = &tg3_mdio_reset;
1058 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1059 tp->mdio_bus->irq = &tp->mdio_irq[0];
1061 for (i = 0; i < PHY_MAX_ADDR; i++)
1062 tp->mdio_bus->irq[i] = PHY_POLL;
1064 /* The bus registration will look for all the PHYs on the mdio bus.
1065 * Unfortunately, it does not ensure the PHY is powered up before
1066 * accessing the PHY ID registers. A chip reset is the
1067 * quickest way to bring the device back to an operational state..
1069 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1072 i = mdiobus_register(tp->mdio_bus);
1074 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1076 mdiobus_free(tp->mdio_bus);
1080 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1082 if (!phydev || !phydev->drv) {
1083 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1084 mdiobus_unregister(tp->mdio_bus);
1085 mdiobus_free(tp->mdio_bus);
1089 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1090 case TG3_PHY_ID_BCM57780:
1091 phydev->interface = PHY_INTERFACE_MODE_GMII;
1093 case TG3_PHY_ID_BCM50610:
1094 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1095 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1096 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1097 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1098 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1099 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1101 case TG3_PHY_ID_RTL8211C:
1102 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1104 case TG3_PHY_ID_RTL8201E:
1105 case TG3_PHY_ID_BCMAC131:
1106 phydev->interface = PHY_INTERFACE_MODE_MII;
1107 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1111 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1114 tg3_mdio_config_5785(tp);
1119 static void tg3_mdio_fini(struct tg3 *tp)
1121 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1122 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1123 mdiobus_unregister(tp->mdio_bus);
1124 mdiobus_free(tp->mdio_bus);
1125 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1129 /* tp->lock is held. */
1130 static inline void tg3_generate_fw_event(struct tg3 *tp)
1134 val = tr32(GRC_RX_CPU_EVENT);
1135 val |= GRC_RX_CPU_DRIVER_EVENT;
1136 tw32_f(GRC_RX_CPU_EVENT, val);
1138 tp->last_event_jiffies = jiffies;
1141 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1143 /* tp->lock is held. */
1144 static void tg3_wait_for_event_ack(struct tg3 *tp)
1147 unsigned int delay_cnt;
1150 /* If enough time has passed, no wait is necessary. */
1151 time_remain = (long)(tp->last_event_jiffies + 1 +
1152 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1154 if (time_remain < 0)
1157 /* Check if we can shorten the wait time. */
1158 delay_cnt = jiffies_to_usecs(time_remain);
1159 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1160 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1161 delay_cnt = (delay_cnt >> 3) + 1;
1163 for (i = 0; i < delay_cnt; i++) {
1164 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1170 /* tp->lock is held. */
1171 static void tg3_ump_link_report(struct tg3 *tp)
1176 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1177 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1180 tg3_wait_for_event_ack(tp);
1182 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1184 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1187 if (!tg3_readphy(tp, MII_BMCR, ®))
1189 if (!tg3_readphy(tp, MII_BMSR, ®))
1190 val |= (reg & 0xffff);
1191 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1194 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1196 if (!tg3_readphy(tp, MII_LPA, ®))
1197 val |= (reg & 0xffff);
1198 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1201 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1202 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1204 if (!tg3_readphy(tp, MII_STAT1000, ®))
1205 val |= (reg & 0xffff);
1207 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1209 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1213 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1215 tg3_generate_fw_event(tp);
1218 static void tg3_link_report(struct tg3 *tp)
1220 if (!netif_carrier_ok(tp->dev)) {
1221 if (netif_msg_link(tp))
1222 printk(KERN_INFO PFX "%s: Link is down.\n",
1224 tg3_ump_link_report(tp);
1225 } else if (netif_msg_link(tp)) {
1226 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1228 (tp->link_config.active_speed == SPEED_1000 ?
1230 (tp->link_config.active_speed == SPEED_100 ?
1232 (tp->link_config.active_duplex == DUPLEX_FULL ?
1235 printk(KERN_INFO PFX
1236 "%s: Flow control is %s for TX and %s for RX.\n",
1238 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1240 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1242 tg3_ump_link_report(tp);
1246 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1250 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1251 miireg = ADVERTISE_PAUSE_CAP;
1252 else if (flow_ctrl & FLOW_CTRL_TX)
1253 miireg = ADVERTISE_PAUSE_ASYM;
1254 else if (flow_ctrl & FLOW_CTRL_RX)
1255 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1262 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1266 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1267 miireg = ADVERTISE_1000XPAUSE;
1268 else if (flow_ctrl & FLOW_CTRL_TX)
1269 miireg = ADVERTISE_1000XPSE_ASYM;
1270 else if (flow_ctrl & FLOW_CTRL_RX)
1271 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1278 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1282 if (lcladv & ADVERTISE_1000XPAUSE) {
1283 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1284 if (rmtadv & LPA_1000XPAUSE)
1285 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1286 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1289 if (rmtadv & LPA_1000XPAUSE)
1290 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1292 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1293 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1300 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1304 u32 old_rx_mode = tp->rx_mode;
1305 u32 old_tx_mode = tp->tx_mode;
1307 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1308 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1310 autoneg = tp->link_config.autoneg;
1312 if (autoneg == AUTONEG_ENABLE &&
1313 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1314 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1315 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1317 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1319 flowctrl = tp->link_config.flowctrl;
1321 tp->link_config.active_flowctrl = flowctrl;
1323 if (flowctrl & FLOW_CTRL_RX)
1324 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1326 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1328 if (old_rx_mode != tp->rx_mode)
1329 tw32_f(MAC_RX_MODE, tp->rx_mode);
1331 if (flowctrl & FLOW_CTRL_TX)
1332 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1334 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1336 if (old_tx_mode != tp->tx_mode)
1337 tw32_f(MAC_TX_MODE, tp->tx_mode);
1340 static void tg3_adjust_link(struct net_device *dev)
1342 u8 oldflowctrl, linkmesg = 0;
1343 u32 mac_mode, lcl_adv, rmt_adv;
1344 struct tg3 *tp = netdev_priv(dev);
1345 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1347 spin_lock(&tp->lock);
1349 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1350 MAC_MODE_HALF_DUPLEX);
1352 oldflowctrl = tp->link_config.active_flowctrl;
1358 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1359 mac_mode |= MAC_MODE_PORT_MODE_MII;
1361 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1363 if (phydev->duplex == DUPLEX_HALF)
1364 mac_mode |= MAC_MODE_HALF_DUPLEX;
1366 lcl_adv = tg3_advert_flowctrl_1000T(
1367 tp->link_config.flowctrl);
1370 rmt_adv = LPA_PAUSE_CAP;
1371 if (phydev->asym_pause)
1372 rmt_adv |= LPA_PAUSE_ASYM;
1375 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1377 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1379 if (mac_mode != tp->mac_mode) {
1380 tp->mac_mode = mac_mode;
1381 tw32_f(MAC_MODE, tp->mac_mode);
1385 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1386 if (phydev->speed == SPEED_10)
1388 MAC_MI_STAT_10MBPS_MODE |
1389 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1391 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1394 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1395 tw32(MAC_TX_LENGTHS,
1396 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1397 (6 << TX_LENGTHS_IPG_SHIFT) |
1398 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1400 tw32(MAC_TX_LENGTHS,
1401 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1402 (6 << TX_LENGTHS_IPG_SHIFT) |
1403 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1405 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1406 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1407 phydev->speed != tp->link_config.active_speed ||
1408 phydev->duplex != tp->link_config.active_duplex ||
1409 oldflowctrl != tp->link_config.active_flowctrl)
1412 tp->link_config.active_speed = phydev->speed;
1413 tp->link_config.active_duplex = phydev->duplex;
1415 spin_unlock(&tp->lock);
1418 tg3_link_report(tp);
1421 static int tg3_phy_init(struct tg3 *tp)
1423 struct phy_device *phydev;
1425 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1428 /* Bring the PHY back to a known state. */
1431 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1433 /* Attach the MAC to the PHY. */
1434 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1435 phydev->dev_flags, phydev->interface);
1436 if (IS_ERR(phydev)) {
1437 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1438 return PTR_ERR(phydev);
1441 /* Mask with MAC supported features. */
1442 switch (phydev->interface) {
1443 case PHY_INTERFACE_MODE_GMII:
1444 case PHY_INTERFACE_MODE_RGMII:
1445 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1446 phydev->supported &= (PHY_GBIT_FEATURES |
1448 SUPPORTED_Asym_Pause);
1452 case PHY_INTERFACE_MODE_MII:
1453 phydev->supported &= (PHY_BASIC_FEATURES |
1455 SUPPORTED_Asym_Pause);
1458 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1462 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1464 phydev->advertising = phydev->supported;
1469 static void tg3_phy_start(struct tg3 *tp)
1471 struct phy_device *phydev;
1473 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1476 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1478 if (tp->link_config.phy_is_low_power) {
1479 tp->link_config.phy_is_low_power = 0;
1480 phydev->speed = tp->link_config.orig_speed;
1481 phydev->duplex = tp->link_config.orig_duplex;
1482 phydev->autoneg = tp->link_config.orig_autoneg;
1483 phydev->advertising = tp->link_config.orig_advertising;
1488 phy_start_aneg(phydev);
1491 static void tg3_phy_stop(struct tg3 *tp)
1493 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1496 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1499 static void tg3_phy_fini(struct tg3 *tp)
1501 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1502 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1503 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1507 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1509 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1510 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1513 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1517 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1520 tg3_writephy(tp, MII_TG3_FET_TEST,
1521 phytest | MII_TG3_FET_SHADOW_EN);
1522 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1524 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1526 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1527 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1529 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1533 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1537 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1540 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1541 tg3_phy_fet_toggle_apd(tp, enable);
1545 reg = MII_TG3_MISC_SHDW_WREN |
1546 MII_TG3_MISC_SHDW_SCR5_SEL |
1547 MII_TG3_MISC_SHDW_SCR5_LPED |
1548 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1549 MII_TG3_MISC_SHDW_SCR5_SDTL |
1550 MII_TG3_MISC_SHDW_SCR5_C125OE;
1551 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1552 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1554 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1557 reg = MII_TG3_MISC_SHDW_WREN |
1558 MII_TG3_MISC_SHDW_APD_SEL |
1559 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1561 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1563 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1566 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1570 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1571 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1574 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1577 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1578 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1580 tg3_writephy(tp, MII_TG3_FET_TEST,
1581 ephy | MII_TG3_FET_SHADOW_EN);
1582 if (!tg3_readphy(tp, reg, &phy)) {
1584 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1586 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1587 tg3_writephy(tp, reg, phy);
1589 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1592 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1593 MII_TG3_AUXCTL_SHDWSEL_MISC;
1594 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1595 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1597 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1599 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1600 phy |= MII_TG3_AUXCTL_MISC_WREN;
1601 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1606 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1610 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1613 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1614 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1615 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1616 (val | (1 << 15) | (1 << 4)));
1619 static void tg3_phy_apply_otp(struct tg3 *tp)
1628 /* Enable SM_DSP clock and tx 6dB coding. */
1629 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1630 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1631 MII_TG3_AUXCTL_ACTL_TX_6DB;
1632 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1634 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1635 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1636 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1638 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1639 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1640 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1642 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1643 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1644 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1646 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1647 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1649 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1650 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1652 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1653 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1654 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1656 /* Turn off SM_DSP clock. */
1657 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1658 MII_TG3_AUXCTL_ACTL_TX_6DB;
1659 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1662 static int tg3_wait_macro_done(struct tg3 *tp)
1669 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1670 if ((tmp32 & 0x1000) == 0)
1680 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1682 static const u32 test_pat[4][6] = {
1683 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1684 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1685 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1686 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1690 for (chan = 0; chan < 4; chan++) {
1693 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1694 (chan * 0x2000) | 0x0200);
1695 tg3_writephy(tp, 0x16, 0x0002);
1697 for (i = 0; i < 6; i++)
1698 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1701 tg3_writephy(tp, 0x16, 0x0202);
1702 if (tg3_wait_macro_done(tp)) {
1707 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1708 (chan * 0x2000) | 0x0200);
1709 tg3_writephy(tp, 0x16, 0x0082);
1710 if (tg3_wait_macro_done(tp)) {
1715 tg3_writephy(tp, 0x16, 0x0802);
1716 if (tg3_wait_macro_done(tp)) {
1721 for (i = 0; i < 6; i += 2) {
1724 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1725 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1726 tg3_wait_macro_done(tp)) {
1732 if (low != test_pat[chan][i] ||
1733 high != test_pat[chan][i+1]) {
1734 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1735 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1736 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1746 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1750 for (chan = 0; chan < 4; chan++) {
1753 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1754 (chan * 0x2000) | 0x0200);
1755 tg3_writephy(tp, 0x16, 0x0002);
1756 for (i = 0; i < 6; i++)
1757 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1758 tg3_writephy(tp, 0x16, 0x0202);
1759 if (tg3_wait_macro_done(tp))
1766 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1768 u32 reg32, phy9_orig;
1769 int retries, do_phy_reset, err;
1775 err = tg3_bmcr_reset(tp);
1781 /* Disable transmitter and interrupt. */
1782 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1786 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1788 /* Set full-duplex, 1000 mbps. */
1789 tg3_writephy(tp, MII_BMCR,
1790 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1792 /* Set to master mode. */
1793 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1796 tg3_writephy(tp, MII_TG3_CTRL,
1797 (MII_TG3_CTRL_AS_MASTER |
1798 MII_TG3_CTRL_ENABLE_AS_MASTER));
1800 /* Enable SM_DSP_CLOCK and 6dB. */
1801 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1803 /* Block the PHY control access. */
1804 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1805 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1807 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1810 } while (--retries);
1812 err = tg3_phy_reset_chanpat(tp);
1816 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1817 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1819 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1820 tg3_writephy(tp, 0x16, 0x0000);
1822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1824 /* Set Extended packet length bit for jumbo frames */
1825 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1828 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1831 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1833 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1835 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1842 /* This will reset the tigon3 PHY if there is no valid
1843 * link unless the FORCE argument is non-zero.
1845 static int tg3_phy_reset(struct tg3 *tp)
1851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1854 val = tr32(GRC_MISC_CFG);
1855 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1858 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1859 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1863 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1864 netif_carrier_off(tp->dev);
1865 tg3_link_report(tp);
1868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1871 err = tg3_phy_reset_5703_4_5(tp);
1878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1879 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1880 cpmuctrl = tr32(TG3_CPMU_CTRL);
1881 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1883 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1886 err = tg3_bmcr_reset(tp);
1890 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1893 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1894 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1896 tw32(TG3_CPMU_CTRL, cpmuctrl);
1899 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1900 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1903 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1904 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1905 CPMU_LSPD_1000MB_MACCLK_12_5) {
1906 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1908 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1912 tg3_phy_apply_otp(tp);
1914 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1915 tg3_phy_toggle_apd(tp, true);
1917 tg3_phy_toggle_apd(tp, false);
1920 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1921 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1922 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1923 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1924 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1925 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1926 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1928 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1929 tg3_writephy(tp, 0x1c, 0x8d68);
1930 tg3_writephy(tp, 0x1c, 0x8d68);
1932 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1933 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1934 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1935 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1936 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1937 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1938 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1939 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1940 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1942 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1943 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1944 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1945 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1946 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1947 tg3_writephy(tp, MII_TG3_TEST1,
1948 MII_TG3_TEST1_TRIM_EN | 0x4);
1950 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1951 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1953 /* Set Extended packet length bit (bit 14) on all chips that */
1954 /* support jumbo frames */
1955 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1956 /* Cannot do read-modify-write on 5401 */
1957 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1958 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1961 /* Set bit 14 with read-modify-write to preserve other bits */
1962 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1963 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1964 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1967 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1968 * jumbo frames transmission.
1970 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1973 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1974 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1975 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1978 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1979 /* adjust output voltage */
1980 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1983 tg3_phy_toggle_automdix(tp, 1);
1984 tg3_phy_set_wirespeed(tp);
1988 static void tg3_frob_aux_power(struct tg3 *tp)
1990 struct tg3 *tp_peer = tp;
1992 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1995 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1996 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1997 struct net_device *dev_peer;
1999 dev_peer = pci_get_drvdata(tp->pdev_peer);
2000 /* remove_one() may have been run on the peer. */
2004 tp_peer = netdev_priv(dev_peer);
2007 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2008 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2009 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2010 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2013 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2014 (GRC_LCLCTRL_GPIO_OE0 |
2015 GRC_LCLCTRL_GPIO_OE1 |
2016 GRC_LCLCTRL_GPIO_OE2 |
2017 GRC_LCLCTRL_GPIO_OUTPUT0 |
2018 GRC_LCLCTRL_GPIO_OUTPUT1),
2020 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2021 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2022 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2023 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2024 GRC_LCLCTRL_GPIO_OE1 |
2025 GRC_LCLCTRL_GPIO_OE2 |
2026 GRC_LCLCTRL_GPIO_OUTPUT0 |
2027 GRC_LCLCTRL_GPIO_OUTPUT1 |
2029 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2031 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2032 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2034 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2035 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2038 u32 grc_local_ctrl = 0;
2040 if (tp_peer != tp &&
2041 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2044 /* Workaround to prevent overdrawing Amps. */
2045 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2047 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2048 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2049 grc_local_ctrl, 100);
2052 /* On 5753 and variants, GPIO2 cannot be used. */
2053 no_gpio2 = tp->nic_sram_data_cfg &
2054 NIC_SRAM_DATA_CFG_NO_GPIO2;
2056 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2057 GRC_LCLCTRL_GPIO_OE1 |
2058 GRC_LCLCTRL_GPIO_OE2 |
2059 GRC_LCLCTRL_GPIO_OUTPUT1 |
2060 GRC_LCLCTRL_GPIO_OUTPUT2;
2062 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2063 GRC_LCLCTRL_GPIO_OUTPUT2);
2065 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2066 grc_local_ctrl, 100);
2068 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2070 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2071 grc_local_ctrl, 100);
2074 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2075 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2076 grc_local_ctrl, 100);
2080 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2081 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2082 if (tp_peer != tp &&
2083 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2086 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2087 (GRC_LCLCTRL_GPIO_OE1 |
2088 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2090 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2091 GRC_LCLCTRL_GPIO_OE1, 100);
2093 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2094 (GRC_LCLCTRL_GPIO_OE1 |
2095 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2100 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2102 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2104 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2105 if (speed != SPEED_10)
2107 } else if (speed == SPEED_10)
2113 static int tg3_setup_phy(struct tg3 *, int);
2115 #define RESET_KIND_SHUTDOWN 0
2116 #define RESET_KIND_INIT 1
2117 #define RESET_KIND_SUSPEND 2
2119 static void tg3_write_sig_post_reset(struct tg3 *, int);
2120 static int tg3_halt_cpu(struct tg3 *, u32);
2122 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2126 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2128 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2129 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2132 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2133 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2134 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2139 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2141 val = tr32(GRC_MISC_CFG);
2142 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2145 } else if (do_low_power) {
2146 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2147 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2149 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2150 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2151 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2152 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2153 MII_TG3_AUXCTL_PCTL_VREG_11V);
2156 /* The PHY should not be powered down on some chips because
2159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2161 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2162 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2165 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2166 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2167 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2168 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2169 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2170 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2173 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2176 /* tp->lock is held. */
2177 static int tg3_nvram_lock(struct tg3 *tp)
2179 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2182 if (tp->nvram_lock_cnt == 0) {
2183 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2184 for (i = 0; i < 8000; i++) {
2185 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2190 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2194 tp->nvram_lock_cnt++;
2199 /* tp->lock is held. */
2200 static void tg3_nvram_unlock(struct tg3 *tp)
2202 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2203 if (tp->nvram_lock_cnt > 0)
2204 tp->nvram_lock_cnt--;
2205 if (tp->nvram_lock_cnt == 0)
2206 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2210 /* tp->lock is held. */
2211 static void tg3_enable_nvram_access(struct tg3 *tp)
2213 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2214 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2215 u32 nvaccess = tr32(NVRAM_ACCESS);
2217 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2221 /* tp->lock is held. */
2222 static void tg3_disable_nvram_access(struct tg3 *tp)
2224 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2225 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2226 u32 nvaccess = tr32(NVRAM_ACCESS);
2228 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2232 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2233 u32 offset, u32 *val)
2238 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2241 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2242 EEPROM_ADDR_DEVID_MASK |
2244 tw32(GRC_EEPROM_ADDR,
2246 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2247 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2248 EEPROM_ADDR_ADDR_MASK) |
2249 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2251 for (i = 0; i < 1000; i++) {
2252 tmp = tr32(GRC_EEPROM_ADDR);
2254 if (tmp & EEPROM_ADDR_COMPLETE)
2258 if (!(tmp & EEPROM_ADDR_COMPLETE))
2261 tmp = tr32(GRC_EEPROM_DATA);
2264 * The data will always be opposite the native endian
2265 * format. Perform a blind byteswap to compensate.
2272 #define NVRAM_CMD_TIMEOUT 10000
2274 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2278 tw32(NVRAM_CMD, nvram_cmd);
2279 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2281 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2287 if (i == NVRAM_CMD_TIMEOUT)
2293 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2295 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2296 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2297 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2298 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2299 (tp->nvram_jedecnum == JEDEC_ATMEL))
2301 addr = ((addr / tp->nvram_pagesize) <<
2302 ATMEL_AT45DB0X1B_PAGE_POS) +
2303 (addr % tp->nvram_pagesize);
2308 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2310 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2311 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2312 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2313 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2314 (tp->nvram_jedecnum == JEDEC_ATMEL))
2316 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2317 tp->nvram_pagesize) +
2318 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2323 /* NOTE: Data read in from NVRAM is byteswapped according to
2324 * the byteswapping settings for all other register accesses.
2325 * tg3 devices are BE devices, so on a BE machine, the data
2326 * returned will be exactly as it is seen in NVRAM. On a LE
2327 * machine, the 32-bit value will be byteswapped.
2329 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2333 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2334 return tg3_nvram_read_using_eeprom(tp, offset, val);
2336 offset = tg3_nvram_phys_addr(tp, offset);
2338 if (offset > NVRAM_ADDR_MSK)
2341 ret = tg3_nvram_lock(tp);
2345 tg3_enable_nvram_access(tp);
2347 tw32(NVRAM_ADDR, offset);
2348 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2349 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2352 *val = tr32(NVRAM_RDDATA);
2354 tg3_disable_nvram_access(tp);
2356 tg3_nvram_unlock(tp);
2361 /* Ensures NVRAM data is in bytestream format. */
2362 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2365 int res = tg3_nvram_read(tp, offset, &v);
2367 *val = cpu_to_be32(v);
2371 /* tp->lock is held. */
2372 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2374 u32 addr_high, addr_low;
2377 addr_high = ((tp->dev->dev_addr[0] << 8) |
2378 tp->dev->dev_addr[1]);
2379 addr_low = ((tp->dev->dev_addr[2] << 24) |
2380 (tp->dev->dev_addr[3] << 16) |
2381 (tp->dev->dev_addr[4] << 8) |
2382 (tp->dev->dev_addr[5] << 0));
2383 for (i = 0; i < 4; i++) {
2384 if (i == 1 && skip_mac_1)
2386 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2387 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2392 for (i = 0; i < 12; i++) {
2393 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2394 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2398 addr_high = (tp->dev->dev_addr[0] +
2399 tp->dev->dev_addr[1] +
2400 tp->dev->dev_addr[2] +
2401 tp->dev->dev_addr[3] +
2402 tp->dev->dev_addr[4] +
2403 tp->dev->dev_addr[5]) &
2404 TX_BACKOFF_SEED_MASK;
2405 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2408 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2411 bool device_should_wake, do_low_power;
2413 /* Make sure register accesses (indirect or otherwise)
2414 * will function correctly.
2416 pci_write_config_dword(tp->pdev,
2417 TG3PCI_MISC_HOST_CTRL,
2418 tp->misc_host_ctrl);
2422 pci_enable_wake(tp->pdev, state, false);
2423 pci_set_power_state(tp->pdev, PCI_D0);
2425 /* Switch out of Vaux if it is a NIC */
2426 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2427 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2437 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2438 tp->dev->name, state);
2442 /* Restore the CLKREQ setting. */
2443 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2446 pci_read_config_word(tp->pdev,
2447 tp->pcie_cap + PCI_EXP_LNKCTL,
2449 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2450 pci_write_config_word(tp->pdev,
2451 tp->pcie_cap + PCI_EXP_LNKCTL,
2455 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2456 tw32(TG3PCI_MISC_HOST_CTRL,
2457 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2459 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2460 device_may_wakeup(&tp->pdev->dev) &&
2461 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2463 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2464 do_low_power = false;
2465 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2466 !tp->link_config.phy_is_low_power) {
2467 struct phy_device *phydev;
2468 u32 phyid, advertising;
2470 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2472 tp->link_config.phy_is_low_power = 1;
2474 tp->link_config.orig_speed = phydev->speed;
2475 tp->link_config.orig_duplex = phydev->duplex;
2476 tp->link_config.orig_autoneg = phydev->autoneg;
2477 tp->link_config.orig_advertising = phydev->advertising;
2479 advertising = ADVERTISED_TP |
2481 ADVERTISED_Autoneg |
2482 ADVERTISED_10baseT_Half;
2484 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2485 device_should_wake) {
2486 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2488 ADVERTISED_100baseT_Half |
2489 ADVERTISED_100baseT_Full |
2490 ADVERTISED_10baseT_Full;
2492 advertising |= ADVERTISED_10baseT_Full;
2495 phydev->advertising = advertising;
2497 phy_start_aneg(phydev);
2499 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2500 if (phyid != TG3_PHY_ID_BCMAC131) {
2501 phyid &= TG3_PHY_OUI_MASK;
2502 if (phyid == TG3_PHY_OUI_1 ||
2503 phyid == TG3_PHY_OUI_2 ||
2504 phyid == TG3_PHY_OUI_3)
2505 do_low_power = true;
2509 do_low_power = true;
2511 if (tp->link_config.phy_is_low_power == 0) {
2512 tp->link_config.phy_is_low_power = 1;
2513 tp->link_config.orig_speed = tp->link_config.speed;
2514 tp->link_config.orig_duplex = tp->link_config.duplex;
2515 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2518 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2519 tp->link_config.speed = SPEED_10;
2520 tp->link_config.duplex = DUPLEX_HALF;
2521 tp->link_config.autoneg = AUTONEG_ENABLE;
2522 tg3_setup_phy(tp, 0);
2526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2529 val = tr32(GRC_VCPU_EXT_CTRL);
2530 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2531 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2535 for (i = 0; i < 200; i++) {
2536 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2537 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2542 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2543 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2544 WOL_DRV_STATE_SHUTDOWN |
2548 if (device_should_wake) {
2551 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2553 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2557 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2558 mac_mode = MAC_MODE_PORT_MODE_GMII;
2560 mac_mode = MAC_MODE_PORT_MODE_MII;
2562 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2563 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2565 u32 speed = (tp->tg3_flags &
2566 TG3_FLAG_WOL_SPEED_100MB) ?
2567 SPEED_100 : SPEED_10;
2568 if (tg3_5700_link_polarity(tp, speed))
2569 mac_mode |= MAC_MODE_LINK_POLARITY;
2571 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2574 mac_mode = MAC_MODE_PORT_MODE_TBI;
2577 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2578 tw32(MAC_LED_CTRL, tp->led_ctrl);
2580 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2581 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2582 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2583 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2584 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2585 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2587 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2588 mac_mode |= tp->mac_mode &
2589 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2590 if (mac_mode & MAC_MODE_APE_TX_EN)
2591 mac_mode |= MAC_MODE_TDE_ENABLE;
2594 tw32_f(MAC_MODE, mac_mode);
2597 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2601 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2602 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2606 base_val = tp->pci_clock_ctrl;
2607 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2608 CLOCK_CTRL_TXCLK_DISABLE);
2610 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2611 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2612 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2613 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2614 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2616 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2617 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2618 u32 newbits1, newbits2;
2620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2622 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2623 CLOCK_CTRL_TXCLK_DISABLE |
2625 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2626 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2627 newbits1 = CLOCK_CTRL_625_CORE;
2628 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2630 newbits1 = CLOCK_CTRL_ALTCLK;
2631 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2634 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2637 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2640 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2645 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2646 CLOCK_CTRL_TXCLK_DISABLE |
2647 CLOCK_CTRL_44MHZ_CORE);
2649 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2652 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2653 tp->pci_clock_ctrl | newbits3, 40);
2657 if (!(device_should_wake) &&
2658 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2659 tg3_power_down_phy(tp, do_low_power);
2661 tg3_frob_aux_power(tp);
2663 /* Workaround for unstable PLL clock */
2664 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2665 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2666 u32 val = tr32(0x7d00);
2668 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2670 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2673 err = tg3_nvram_lock(tp);
2674 tg3_halt_cpu(tp, RX_CPU_BASE);
2676 tg3_nvram_unlock(tp);
2680 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2682 if (device_should_wake)
2683 pci_enable_wake(tp->pdev, state, true);
2685 /* Finally, set the new power state. */
2686 pci_set_power_state(tp->pdev, state);
2691 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2693 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2694 case MII_TG3_AUX_STAT_10HALF:
2696 *duplex = DUPLEX_HALF;
2699 case MII_TG3_AUX_STAT_10FULL:
2701 *duplex = DUPLEX_FULL;
2704 case MII_TG3_AUX_STAT_100HALF:
2706 *duplex = DUPLEX_HALF;
2709 case MII_TG3_AUX_STAT_100FULL:
2711 *duplex = DUPLEX_FULL;
2714 case MII_TG3_AUX_STAT_1000HALF:
2715 *speed = SPEED_1000;
2716 *duplex = DUPLEX_HALF;
2719 case MII_TG3_AUX_STAT_1000FULL:
2720 *speed = SPEED_1000;
2721 *duplex = DUPLEX_FULL;
2725 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2726 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2728 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2732 *speed = SPEED_INVALID;
2733 *duplex = DUPLEX_INVALID;
2738 static void tg3_phy_copper_begin(struct tg3 *tp)
2743 if (tp->link_config.phy_is_low_power) {
2744 /* Entering low power mode. Disable gigabit and
2745 * 100baseT advertisements.
2747 tg3_writephy(tp, MII_TG3_CTRL, 0);
2749 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2750 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2751 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2752 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2754 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2755 } else if (tp->link_config.speed == SPEED_INVALID) {
2756 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2757 tp->link_config.advertising &=
2758 ~(ADVERTISED_1000baseT_Half |
2759 ADVERTISED_1000baseT_Full);
2761 new_adv = ADVERTISE_CSMA;
2762 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2763 new_adv |= ADVERTISE_10HALF;
2764 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2765 new_adv |= ADVERTISE_10FULL;
2766 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2767 new_adv |= ADVERTISE_100HALF;
2768 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2769 new_adv |= ADVERTISE_100FULL;
2771 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2773 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2775 if (tp->link_config.advertising &
2776 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2778 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2779 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2780 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2781 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2782 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2783 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2784 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2785 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2786 MII_TG3_CTRL_ENABLE_AS_MASTER);
2787 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2789 tg3_writephy(tp, MII_TG3_CTRL, 0);
2792 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2793 new_adv |= ADVERTISE_CSMA;
2795 /* Asking for a specific link mode. */
2796 if (tp->link_config.speed == SPEED_1000) {
2797 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2799 if (tp->link_config.duplex == DUPLEX_FULL)
2800 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2802 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2803 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2804 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2805 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2806 MII_TG3_CTRL_ENABLE_AS_MASTER);
2808 if (tp->link_config.speed == SPEED_100) {
2809 if (tp->link_config.duplex == DUPLEX_FULL)
2810 new_adv |= ADVERTISE_100FULL;
2812 new_adv |= ADVERTISE_100HALF;
2814 if (tp->link_config.duplex == DUPLEX_FULL)
2815 new_adv |= ADVERTISE_10FULL;
2817 new_adv |= ADVERTISE_10HALF;
2819 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2824 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2827 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2828 tp->link_config.speed != SPEED_INVALID) {
2829 u32 bmcr, orig_bmcr;
2831 tp->link_config.active_speed = tp->link_config.speed;
2832 tp->link_config.active_duplex = tp->link_config.duplex;
2835 switch (tp->link_config.speed) {
2841 bmcr |= BMCR_SPEED100;
2845 bmcr |= TG3_BMCR_SPEED1000;
2849 if (tp->link_config.duplex == DUPLEX_FULL)
2850 bmcr |= BMCR_FULLDPLX;
2852 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2853 (bmcr != orig_bmcr)) {
2854 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2855 for (i = 0; i < 1500; i++) {
2859 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2860 tg3_readphy(tp, MII_BMSR, &tmp))
2862 if (!(tmp & BMSR_LSTATUS)) {
2867 tg3_writephy(tp, MII_BMCR, bmcr);
2871 tg3_writephy(tp, MII_BMCR,
2872 BMCR_ANENABLE | BMCR_ANRESTART);
2876 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2880 /* Turn off tap power management. */
2881 /* Set Extended packet length bit */
2882 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2884 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2885 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2887 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2888 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2890 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2891 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2893 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2894 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2896 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2897 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2904 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2906 u32 adv_reg, all_mask = 0;
2908 if (mask & ADVERTISED_10baseT_Half)
2909 all_mask |= ADVERTISE_10HALF;
2910 if (mask & ADVERTISED_10baseT_Full)
2911 all_mask |= ADVERTISE_10FULL;
2912 if (mask & ADVERTISED_100baseT_Half)
2913 all_mask |= ADVERTISE_100HALF;
2914 if (mask & ADVERTISED_100baseT_Full)
2915 all_mask |= ADVERTISE_100FULL;
2917 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2920 if ((adv_reg & all_mask) != all_mask)
2922 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2926 if (mask & ADVERTISED_1000baseT_Half)
2927 all_mask |= ADVERTISE_1000HALF;
2928 if (mask & ADVERTISED_1000baseT_Full)
2929 all_mask |= ADVERTISE_1000FULL;
2931 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2934 if ((tg3_ctrl & all_mask) != all_mask)
2940 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2944 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2947 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2948 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2950 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2951 if (curadv != reqadv)
2954 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2955 tg3_readphy(tp, MII_LPA, rmtadv);
2957 /* Reprogram the advertisement register, even if it
2958 * does not affect the current link. If the link
2959 * gets renegotiated in the future, we can save an
2960 * additional renegotiation cycle by advertising
2961 * it correctly in the first place.
2963 if (curadv != reqadv) {
2964 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2965 ADVERTISE_PAUSE_ASYM);
2966 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2973 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2975 int current_link_up;
2977 u32 lcl_adv, rmt_adv;
2985 (MAC_STATUS_SYNC_CHANGED |
2986 MAC_STATUS_CFG_CHANGED |
2987 MAC_STATUS_MI_COMPLETION |
2988 MAC_STATUS_LNKSTATE_CHANGED));
2991 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2993 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2997 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2999 /* Some third-party PHYs need to be reset on link going
3002 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3003 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3005 netif_carrier_ok(tp->dev)) {
3006 tg3_readphy(tp, MII_BMSR, &bmsr);
3007 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3008 !(bmsr & BMSR_LSTATUS))
3014 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3015 tg3_readphy(tp, MII_BMSR, &bmsr);
3016 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3017 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3020 if (!(bmsr & BMSR_LSTATUS)) {
3021 err = tg3_init_5401phy_dsp(tp);
3025 tg3_readphy(tp, MII_BMSR, &bmsr);
3026 for (i = 0; i < 1000; i++) {
3028 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3029 (bmsr & BMSR_LSTATUS)) {
3035 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3036 !(bmsr & BMSR_LSTATUS) &&
3037 tp->link_config.active_speed == SPEED_1000) {
3038 err = tg3_phy_reset(tp);
3040 err = tg3_init_5401phy_dsp(tp);
3045 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3046 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3047 /* 5701 {A0,B0} CRC bug workaround */
3048 tg3_writephy(tp, 0x15, 0x0a75);
3049 tg3_writephy(tp, 0x1c, 0x8c68);
3050 tg3_writephy(tp, 0x1c, 0x8d68);
3051 tg3_writephy(tp, 0x1c, 0x8c68);
3054 /* Clear pending interrupts... */
3055 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3056 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3058 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3059 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3060 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3061 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3065 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3066 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3067 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3069 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3072 current_link_up = 0;
3073 current_speed = SPEED_INVALID;
3074 current_duplex = DUPLEX_INVALID;
3076 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3079 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3080 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3081 if (!(val & (1 << 10))) {
3083 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3089 for (i = 0; i < 100; i++) {
3090 tg3_readphy(tp, MII_BMSR, &bmsr);
3091 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3092 (bmsr & BMSR_LSTATUS))
3097 if (bmsr & BMSR_LSTATUS) {
3100 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3101 for (i = 0; i < 2000; i++) {
3103 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3108 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3113 for (i = 0; i < 200; i++) {
3114 tg3_readphy(tp, MII_BMCR, &bmcr);
3115 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3117 if (bmcr && bmcr != 0x7fff)
3125 tp->link_config.active_speed = current_speed;
3126 tp->link_config.active_duplex = current_duplex;
3128 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3129 if ((bmcr & BMCR_ANENABLE) &&
3130 tg3_copper_is_advertising_all(tp,
3131 tp->link_config.advertising)) {
3132 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3134 current_link_up = 1;
3137 if (!(bmcr & BMCR_ANENABLE) &&
3138 tp->link_config.speed == current_speed &&
3139 tp->link_config.duplex == current_duplex &&
3140 tp->link_config.flowctrl ==
3141 tp->link_config.active_flowctrl) {
3142 current_link_up = 1;
3146 if (current_link_up == 1 &&
3147 tp->link_config.active_duplex == DUPLEX_FULL)
3148 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3152 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3155 tg3_phy_copper_begin(tp);
3157 tg3_readphy(tp, MII_BMSR, &tmp);
3158 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3159 (tmp & BMSR_LSTATUS))
3160 current_link_up = 1;
3163 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3164 if (current_link_up == 1) {
3165 if (tp->link_config.active_speed == SPEED_100 ||
3166 tp->link_config.active_speed == SPEED_10)
3167 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3169 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3170 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3171 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3173 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3175 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3176 if (tp->link_config.active_duplex == DUPLEX_HALF)
3177 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3180 if (current_link_up == 1 &&
3181 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3182 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3184 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3187 /* ??? Without this setting Netgear GA302T PHY does not
3188 * ??? send/receive packets...
3190 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3191 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3192 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3193 tw32_f(MAC_MI_MODE, tp->mi_mode);
3197 tw32_f(MAC_MODE, tp->mac_mode);
3200 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3201 /* Polled via timer. */
3202 tw32_f(MAC_EVENT, 0);
3204 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3209 current_link_up == 1 &&
3210 tp->link_config.active_speed == SPEED_1000 &&
3211 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3212 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3215 (MAC_STATUS_SYNC_CHANGED |
3216 MAC_STATUS_CFG_CHANGED));
3219 NIC_SRAM_FIRMWARE_MBOX,
3220 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3223 /* Prevent send BD corruption. */
3224 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3225 u16 oldlnkctl, newlnkctl;
3227 pci_read_config_word(tp->pdev,
3228 tp->pcie_cap + PCI_EXP_LNKCTL,
3230 if (tp->link_config.active_speed == SPEED_100 ||
3231 tp->link_config.active_speed == SPEED_10)
3232 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3234 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3235 if (newlnkctl != oldlnkctl)
3236 pci_write_config_word(tp->pdev,
3237 tp->pcie_cap + PCI_EXP_LNKCTL,
3239 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3240 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3241 if (tp->link_config.active_speed == SPEED_100 ||
3242 tp->link_config.active_speed == SPEED_10)
3243 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3245 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3246 if (newreg != oldreg)
3247 tw32(TG3_PCIE_LNKCTL, newreg);
3250 if (current_link_up != netif_carrier_ok(tp->dev)) {
3251 if (current_link_up)
3252 netif_carrier_on(tp->dev);
3254 netif_carrier_off(tp->dev);
3255 tg3_link_report(tp);
3261 struct tg3_fiber_aneginfo {
3263 #define ANEG_STATE_UNKNOWN 0
3264 #define ANEG_STATE_AN_ENABLE 1
3265 #define ANEG_STATE_RESTART_INIT 2
3266 #define ANEG_STATE_RESTART 3
3267 #define ANEG_STATE_DISABLE_LINK_OK 4
3268 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3269 #define ANEG_STATE_ABILITY_DETECT 6
3270 #define ANEG_STATE_ACK_DETECT_INIT 7
3271 #define ANEG_STATE_ACK_DETECT 8
3272 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3273 #define ANEG_STATE_COMPLETE_ACK 10
3274 #define ANEG_STATE_IDLE_DETECT_INIT 11
3275 #define ANEG_STATE_IDLE_DETECT 12
3276 #define ANEG_STATE_LINK_OK 13
3277 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3278 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3281 #define MR_AN_ENABLE 0x00000001
3282 #define MR_RESTART_AN 0x00000002
3283 #define MR_AN_COMPLETE 0x00000004
3284 #define MR_PAGE_RX 0x00000008
3285 #define MR_NP_LOADED 0x00000010
3286 #define MR_TOGGLE_TX 0x00000020
3287 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3288 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3289 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3290 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3291 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3292 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3293 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3294 #define MR_TOGGLE_RX 0x00002000
3295 #define MR_NP_RX 0x00004000
3297 #define MR_LINK_OK 0x80000000
3299 unsigned long link_time, cur_time;
3301 u32 ability_match_cfg;
3302 int ability_match_count;
3304 char ability_match, idle_match, ack_match;
3306 u32 txconfig, rxconfig;
3307 #define ANEG_CFG_NP 0x00000080
3308 #define ANEG_CFG_ACK 0x00000040
3309 #define ANEG_CFG_RF2 0x00000020
3310 #define ANEG_CFG_RF1 0x00000010
3311 #define ANEG_CFG_PS2 0x00000001
3312 #define ANEG_CFG_PS1 0x00008000
3313 #define ANEG_CFG_HD 0x00004000
3314 #define ANEG_CFG_FD 0x00002000
3315 #define ANEG_CFG_INVAL 0x00001f06
3320 #define ANEG_TIMER_ENAB 2
3321 #define ANEG_FAILED -1
3323 #define ANEG_STATE_SETTLE_TIME 10000
3325 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3326 struct tg3_fiber_aneginfo *ap)
3329 unsigned long delta;
3333 if (ap->state == ANEG_STATE_UNKNOWN) {
3337 ap->ability_match_cfg = 0;
3338 ap->ability_match_count = 0;
3339 ap->ability_match = 0;
3345 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3346 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3348 if (rx_cfg_reg != ap->ability_match_cfg) {
3349 ap->ability_match_cfg = rx_cfg_reg;
3350 ap->ability_match = 0;
3351 ap->ability_match_count = 0;
3353 if (++ap->ability_match_count > 1) {
3354 ap->ability_match = 1;
3355 ap->ability_match_cfg = rx_cfg_reg;
3358 if (rx_cfg_reg & ANEG_CFG_ACK)
3366 ap->ability_match_cfg = 0;
3367 ap->ability_match_count = 0;
3368 ap->ability_match = 0;
3374 ap->rxconfig = rx_cfg_reg;
3378 case ANEG_STATE_UNKNOWN:
3379 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3380 ap->state = ANEG_STATE_AN_ENABLE;
3383 case ANEG_STATE_AN_ENABLE:
3384 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3385 if (ap->flags & MR_AN_ENABLE) {
3388 ap->ability_match_cfg = 0;
3389 ap->ability_match_count = 0;
3390 ap->ability_match = 0;
3394 ap->state = ANEG_STATE_RESTART_INIT;
3396 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3400 case ANEG_STATE_RESTART_INIT:
3401 ap->link_time = ap->cur_time;
3402 ap->flags &= ~(MR_NP_LOADED);
3404 tw32(MAC_TX_AUTO_NEG, 0);
3405 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3406 tw32_f(MAC_MODE, tp->mac_mode);
3409 ret = ANEG_TIMER_ENAB;
3410 ap->state = ANEG_STATE_RESTART;
3413 case ANEG_STATE_RESTART:
3414 delta = ap->cur_time - ap->link_time;
3415 if (delta > ANEG_STATE_SETTLE_TIME) {
3416 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3418 ret = ANEG_TIMER_ENAB;
3422 case ANEG_STATE_DISABLE_LINK_OK:
3426 case ANEG_STATE_ABILITY_DETECT_INIT:
3427 ap->flags &= ~(MR_TOGGLE_TX);
3428 ap->txconfig = ANEG_CFG_FD;
3429 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3430 if (flowctrl & ADVERTISE_1000XPAUSE)
3431 ap->txconfig |= ANEG_CFG_PS1;
3432 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3433 ap->txconfig |= ANEG_CFG_PS2;
3434 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3435 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3436 tw32_f(MAC_MODE, tp->mac_mode);
3439 ap->state = ANEG_STATE_ABILITY_DETECT;
3442 case ANEG_STATE_ABILITY_DETECT:
3443 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3444 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3448 case ANEG_STATE_ACK_DETECT_INIT:
3449 ap->txconfig |= ANEG_CFG_ACK;
3450 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3451 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3452 tw32_f(MAC_MODE, tp->mac_mode);
3455 ap->state = ANEG_STATE_ACK_DETECT;
3458 case ANEG_STATE_ACK_DETECT:
3459 if (ap->ack_match != 0) {
3460 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3461 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3462 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3464 ap->state = ANEG_STATE_AN_ENABLE;
3466 } else if (ap->ability_match != 0 &&
3467 ap->rxconfig == 0) {
3468 ap->state = ANEG_STATE_AN_ENABLE;
3472 case ANEG_STATE_COMPLETE_ACK_INIT:
3473 if (ap->rxconfig & ANEG_CFG_INVAL) {
3477 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3478 MR_LP_ADV_HALF_DUPLEX |
3479 MR_LP_ADV_SYM_PAUSE |
3480 MR_LP_ADV_ASYM_PAUSE |
3481 MR_LP_ADV_REMOTE_FAULT1 |
3482 MR_LP_ADV_REMOTE_FAULT2 |
3483 MR_LP_ADV_NEXT_PAGE |
3486 if (ap->rxconfig & ANEG_CFG_FD)
3487 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3488 if (ap->rxconfig & ANEG_CFG_HD)
3489 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3490 if (ap->rxconfig & ANEG_CFG_PS1)
3491 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3492 if (ap->rxconfig & ANEG_CFG_PS2)
3493 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3494 if (ap->rxconfig & ANEG_CFG_RF1)
3495 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3496 if (ap->rxconfig & ANEG_CFG_RF2)
3497 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3498 if (ap->rxconfig & ANEG_CFG_NP)
3499 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3501 ap->link_time = ap->cur_time;
3503 ap->flags ^= (MR_TOGGLE_TX);
3504 if (ap->rxconfig & 0x0008)
3505 ap->flags |= MR_TOGGLE_RX;
3506 if (ap->rxconfig & ANEG_CFG_NP)
3507 ap->flags |= MR_NP_RX;
3508 ap->flags |= MR_PAGE_RX;
3510 ap->state = ANEG_STATE_COMPLETE_ACK;
3511 ret = ANEG_TIMER_ENAB;
3514 case ANEG_STATE_COMPLETE_ACK:
3515 if (ap->ability_match != 0 &&
3516 ap->rxconfig == 0) {
3517 ap->state = ANEG_STATE_AN_ENABLE;
3520 delta = ap->cur_time - ap->link_time;
3521 if (delta > ANEG_STATE_SETTLE_TIME) {
3522 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3523 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3525 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3526 !(ap->flags & MR_NP_RX)) {
3527 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3535 case ANEG_STATE_IDLE_DETECT_INIT:
3536 ap->link_time = ap->cur_time;
3537 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3538 tw32_f(MAC_MODE, tp->mac_mode);
3541 ap->state = ANEG_STATE_IDLE_DETECT;
3542 ret = ANEG_TIMER_ENAB;
3545 case ANEG_STATE_IDLE_DETECT:
3546 if (ap->ability_match != 0 &&
3547 ap->rxconfig == 0) {
3548 ap->state = ANEG_STATE_AN_ENABLE;
3551 delta = ap->cur_time - ap->link_time;
3552 if (delta > ANEG_STATE_SETTLE_TIME) {
3553 /* XXX another gem from the Broadcom driver :( */
3554 ap->state = ANEG_STATE_LINK_OK;
3558 case ANEG_STATE_LINK_OK:
3559 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3563 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3564 /* ??? unimplemented */
3567 case ANEG_STATE_NEXT_PAGE_WAIT:
3568 /* ??? unimplemented */
3579 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3582 struct tg3_fiber_aneginfo aninfo;
3583 int status = ANEG_FAILED;
3587 tw32_f(MAC_TX_AUTO_NEG, 0);
3589 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3590 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3593 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3596 memset(&aninfo, 0, sizeof(aninfo));
3597 aninfo.flags |= MR_AN_ENABLE;
3598 aninfo.state = ANEG_STATE_UNKNOWN;
3599 aninfo.cur_time = 0;
3601 while (++tick < 195000) {
3602 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3603 if (status == ANEG_DONE || status == ANEG_FAILED)
3609 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3610 tw32_f(MAC_MODE, tp->mac_mode);
3613 *txflags = aninfo.txconfig;
3614 *rxflags = aninfo.flags;
3616 if (status == ANEG_DONE &&
3617 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3618 MR_LP_ADV_FULL_DUPLEX)))
3624 static void tg3_init_bcm8002(struct tg3 *tp)
3626 u32 mac_status = tr32(MAC_STATUS);
3629 /* Reset when initting first time or we have a link. */
3630 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3631 !(mac_status & MAC_STATUS_PCS_SYNCED))
3634 /* Set PLL lock range. */
3635 tg3_writephy(tp, 0x16, 0x8007);
3638 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3640 /* Wait for reset to complete. */
3641 /* XXX schedule_timeout() ... */
3642 for (i = 0; i < 500; i++)
3645 /* Config mode; select PMA/Ch 1 regs. */
3646 tg3_writephy(tp, 0x10, 0x8411);
3648 /* Enable auto-lock and comdet, select txclk for tx. */
3649 tg3_writephy(tp, 0x11, 0x0a10);
3651 tg3_writephy(tp, 0x18, 0x00a0);
3652 tg3_writephy(tp, 0x16, 0x41ff);
3654 /* Assert and deassert POR. */
3655 tg3_writephy(tp, 0x13, 0x0400);
3657 tg3_writephy(tp, 0x13, 0x0000);
3659 tg3_writephy(tp, 0x11, 0x0a50);
3661 tg3_writephy(tp, 0x11, 0x0a10);
3663 /* Wait for signal to stabilize */
3664 /* XXX schedule_timeout() ... */
3665 for (i = 0; i < 15000; i++)
3668 /* Deselect the channel register so we can read the PHYID
3671 tg3_writephy(tp, 0x10, 0x8011);
3674 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3677 u32 sg_dig_ctrl, sg_dig_status;
3678 u32 serdes_cfg, expected_sg_dig_ctrl;
3679 int workaround, port_a;
3680 int current_link_up;
3683 expected_sg_dig_ctrl = 0;
3686 current_link_up = 0;
3688 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3689 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3691 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3694 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3695 /* preserve bits 20-23 for voltage regulator */
3696 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3699 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3701 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3702 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3704 u32 val = serdes_cfg;
3710 tw32_f(MAC_SERDES_CFG, val);
3713 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3715 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3716 tg3_setup_flow_control(tp, 0, 0);
3717 current_link_up = 1;
3722 /* Want auto-negotiation. */
3723 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3725 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3726 if (flowctrl & ADVERTISE_1000XPAUSE)
3727 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3728 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3729 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3731 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3732 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3733 tp->serdes_counter &&
3734 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3735 MAC_STATUS_RCVD_CFG)) ==
3736 MAC_STATUS_PCS_SYNCED)) {
3737 tp->serdes_counter--;
3738 current_link_up = 1;
3743 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3744 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3746 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3748 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3749 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3750 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3751 MAC_STATUS_SIGNAL_DET)) {
3752 sg_dig_status = tr32(SG_DIG_STATUS);
3753 mac_status = tr32(MAC_STATUS);
3755 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3756 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3757 u32 local_adv = 0, remote_adv = 0;
3759 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3760 local_adv |= ADVERTISE_1000XPAUSE;
3761 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3762 local_adv |= ADVERTISE_1000XPSE_ASYM;
3764 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3765 remote_adv |= LPA_1000XPAUSE;
3766 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3767 remote_adv |= LPA_1000XPAUSE_ASYM;
3769 tg3_setup_flow_control(tp, local_adv, remote_adv);
3770 current_link_up = 1;
3771 tp->serdes_counter = 0;
3772 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3773 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3774 if (tp->serdes_counter)
3775 tp->serdes_counter--;
3778 u32 val = serdes_cfg;
3785 tw32_f(MAC_SERDES_CFG, val);
3788 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3791 /* Link parallel detection - link is up */
3792 /* only if we have PCS_SYNC and not */
3793 /* receiving config code words */
3794 mac_status = tr32(MAC_STATUS);
3795 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3796 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3797 tg3_setup_flow_control(tp, 0, 0);
3798 current_link_up = 1;
3800 TG3_FLG2_PARALLEL_DETECT;
3801 tp->serdes_counter =
3802 SERDES_PARALLEL_DET_TIMEOUT;
3804 goto restart_autoneg;
3808 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3809 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3813 return current_link_up;
3816 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3818 int current_link_up = 0;
3820 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3823 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3824 u32 txflags, rxflags;
3827 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3828 u32 local_adv = 0, remote_adv = 0;
3830 if (txflags & ANEG_CFG_PS1)
3831 local_adv |= ADVERTISE_1000XPAUSE;
3832 if (txflags & ANEG_CFG_PS2)
3833 local_adv |= ADVERTISE_1000XPSE_ASYM;
3835 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3836 remote_adv |= LPA_1000XPAUSE;
3837 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3838 remote_adv |= LPA_1000XPAUSE_ASYM;
3840 tg3_setup_flow_control(tp, local_adv, remote_adv);
3842 current_link_up = 1;
3844 for (i = 0; i < 30; i++) {
3847 (MAC_STATUS_SYNC_CHANGED |
3848 MAC_STATUS_CFG_CHANGED));
3850 if ((tr32(MAC_STATUS) &
3851 (MAC_STATUS_SYNC_CHANGED |
3852 MAC_STATUS_CFG_CHANGED)) == 0)
3856 mac_status = tr32(MAC_STATUS);
3857 if (current_link_up == 0 &&
3858 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3859 !(mac_status & MAC_STATUS_RCVD_CFG))
3860 current_link_up = 1;
3862 tg3_setup_flow_control(tp, 0, 0);
3864 /* Forcing 1000FD link up. */
3865 current_link_up = 1;
3867 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3870 tw32_f(MAC_MODE, tp->mac_mode);
3875 return current_link_up;
3878 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3881 u16 orig_active_speed;
3882 u8 orig_active_duplex;
3884 int current_link_up;
3887 orig_pause_cfg = tp->link_config.active_flowctrl;
3888 orig_active_speed = tp->link_config.active_speed;
3889 orig_active_duplex = tp->link_config.active_duplex;
3891 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3892 netif_carrier_ok(tp->dev) &&
3893 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3894 mac_status = tr32(MAC_STATUS);
3895 mac_status &= (MAC_STATUS_PCS_SYNCED |
3896 MAC_STATUS_SIGNAL_DET |
3897 MAC_STATUS_CFG_CHANGED |
3898 MAC_STATUS_RCVD_CFG);
3899 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3900 MAC_STATUS_SIGNAL_DET)) {
3901 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3902 MAC_STATUS_CFG_CHANGED));
3907 tw32_f(MAC_TX_AUTO_NEG, 0);
3909 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3910 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3911 tw32_f(MAC_MODE, tp->mac_mode);
3914 if (tp->phy_id == PHY_ID_BCM8002)
3915 tg3_init_bcm8002(tp);
3917 /* Enable link change event even when serdes polling. */
3918 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3921 current_link_up = 0;
3922 mac_status = tr32(MAC_STATUS);
3924 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3925 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3927 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3929 tp->napi[0].hw_status->status =
3930 (SD_STATUS_UPDATED |
3931 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3933 for (i = 0; i < 100; i++) {
3934 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3935 MAC_STATUS_CFG_CHANGED));
3937 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3938 MAC_STATUS_CFG_CHANGED |
3939 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3943 mac_status = tr32(MAC_STATUS);
3944 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3945 current_link_up = 0;
3946 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3947 tp->serdes_counter == 0) {
3948 tw32_f(MAC_MODE, (tp->mac_mode |
3949 MAC_MODE_SEND_CONFIGS));
3951 tw32_f(MAC_MODE, tp->mac_mode);
3955 if (current_link_up == 1) {
3956 tp->link_config.active_speed = SPEED_1000;
3957 tp->link_config.active_duplex = DUPLEX_FULL;
3958 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3959 LED_CTRL_LNKLED_OVERRIDE |
3960 LED_CTRL_1000MBPS_ON));
3962 tp->link_config.active_speed = SPEED_INVALID;
3963 tp->link_config.active_duplex = DUPLEX_INVALID;
3964 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3965 LED_CTRL_LNKLED_OVERRIDE |
3966 LED_CTRL_TRAFFIC_OVERRIDE));
3969 if (current_link_up != netif_carrier_ok(tp->dev)) {
3970 if (current_link_up)
3971 netif_carrier_on(tp->dev);
3973 netif_carrier_off(tp->dev);
3974 tg3_link_report(tp);
3976 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3977 if (orig_pause_cfg != now_pause_cfg ||
3978 orig_active_speed != tp->link_config.active_speed ||
3979 orig_active_duplex != tp->link_config.active_duplex)
3980 tg3_link_report(tp);
3986 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3988 int current_link_up, err = 0;
3992 u32 local_adv, remote_adv;
3994 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3995 tw32_f(MAC_MODE, tp->mac_mode);
4001 (MAC_STATUS_SYNC_CHANGED |
4002 MAC_STATUS_CFG_CHANGED |
4003 MAC_STATUS_MI_COMPLETION |
4004 MAC_STATUS_LNKSTATE_CHANGED));
4010 current_link_up = 0;
4011 current_speed = SPEED_INVALID;
4012 current_duplex = DUPLEX_INVALID;
4014 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4015 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4017 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4018 bmsr |= BMSR_LSTATUS;
4020 bmsr &= ~BMSR_LSTATUS;
4023 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4025 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4026 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4027 /* do nothing, just check for link up at the end */
4028 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4031 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4032 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4033 ADVERTISE_1000XPAUSE |
4034 ADVERTISE_1000XPSE_ASYM |
4037 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4039 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4040 new_adv |= ADVERTISE_1000XHALF;
4041 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4042 new_adv |= ADVERTISE_1000XFULL;
4044 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4045 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4046 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4047 tg3_writephy(tp, MII_BMCR, bmcr);
4049 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4050 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4051 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4058 bmcr &= ~BMCR_SPEED1000;
4059 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4061 if (tp->link_config.duplex == DUPLEX_FULL)
4062 new_bmcr |= BMCR_FULLDPLX;
4064 if (new_bmcr != bmcr) {
4065 /* BMCR_SPEED1000 is a reserved bit that needs
4066 * to be set on write.
4068 new_bmcr |= BMCR_SPEED1000;
4070 /* Force a linkdown */
4071 if (netif_carrier_ok(tp->dev)) {
4074 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4075 adv &= ~(ADVERTISE_1000XFULL |
4076 ADVERTISE_1000XHALF |
4078 tg3_writephy(tp, MII_ADVERTISE, adv);
4079 tg3_writephy(tp, MII_BMCR, bmcr |
4083 netif_carrier_off(tp->dev);
4085 tg3_writephy(tp, MII_BMCR, new_bmcr);
4087 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4088 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4089 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4091 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4092 bmsr |= BMSR_LSTATUS;
4094 bmsr &= ~BMSR_LSTATUS;
4096 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4100 if (bmsr & BMSR_LSTATUS) {
4101 current_speed = SPEED_1000;
4102 current_link_up = 1;
4103 if (bmcr & BMCR_FULLDPLX)
4104 current_duplex = DUPLEX_FULL;
4106 current_duplex = DUPLEX_HALF;
4111 if (bmcr & BMCR_ANENABLE) {
4114 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4115 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4116 common = local_adv & remote_adv;
4117 if (common & (ADVERTISE_1000XHALF |
4118 ADVERTISE_1000XFULL)) {
4119 if (common & ADVERTISE_1000XFULL)
4120 current_duplex = DUPLEX_FULL;
4122 current_duplex = DUPLEX_HALF;
4125 current_link_up = 0;
4129 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4130 tg3_setup_flow_control(tp, local_adv, remote_adv);
4132 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4133 if (tp->link_config.active_duplex == DUPLEX_HALF)
4134 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4136 tw32_f(MAC_MODE, tp->mac_mode);
4139 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4141 tp->link_config.active_speed = current_speed;
4142 tp->link_config.active_duplex = current_duplex;
4144 if (current_link_up != netif_carrier_ok(tp->dev)) {
4145 if (current_link_up)
4146 netif_carrier_on(tp->dev);
4148 netif_carrier_off(tp->dev);
4149 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4151 tg3_link_report(tp);
4156 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4158 if (tp->serdes_counter) {
4159 /* Give autoneg time to complete. */
4160 tp->serdes_counter--;
4163 if (!netif_carrier_ok(tp->dev) &&
4164 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4167 tg3_readphy(tp, MII_BMCR, &bmcr);
4168 if (bmcr & BMCR_ANENABLE) {
4171 /* Select shadow register 0x1f */
4172 tg3_writephy(tp, 0x1c, 0x7c00);
4173 tg3_readphy(tp, 0x1c, &phy1);
4175 /* Select expansion interrupt status register */
4176 tg3_writephy(tp, 0x17, 0x0f01);
4177 tg3_readphy(tp, 0x15, &phy2);
4178 tg3_readphy(tp, 0x15, &phy2);
4180 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4181 /* We have signal detect and not receiving
4182 * config code words, link is up by parallel
4186 bmcr &= ~BMCR_ANENABLE;
4187 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4188 tg3_writephy(tp, MII_BMCR, bmcr);
4189 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4193 else if (netif_carrier_ok(tp->dev) &&
4194 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4195 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4198 /* Select expansion interrupt status register */
4199 tg3_writephy(tp, 0x17, 0x0f01);
4200 tg3_readphy(tp, 0x15, &phy2);
4204 /* Config code words received, turn on autoneg. */
4205 tg3_readphy(tp, MII_BMCR, &bmcr);
4206 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4208 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4214 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4218 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4219 err = tg3_setup_fiber_phy(tp, force_reset);
4220 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4221 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4223 err = tg3_setup_copper_phy(tp, force_reset);
4226 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4229 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4230 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4232 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4237 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4238 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4239 tw32(GRC_MISC_CFG, val);
4242 if (tp->link_config.active_speed == SPEED_1000 &&
4243 tp->link_config.active_duplex == DUPLEX_HALF)
4244 tw32(MAC_TX_LENGTHS,
4245 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4246 (6 << TX_LENGTHS_IPG_SHIFT) |
4247 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4249 tw32(MAC_TX_LENGTHS,
4250 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4251 (6 << TX_LENGTHS_IPG_SHIFT) |
4252 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4254 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4255 if (netif_carrier_ok(tp->dev)) {
4256 tw32(HOSTCC_STAT_COAL_TICKS,
4257 tp->coal.stats_block_coalesce_usecs);
4259 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4263 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4264 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4265 if (!netif_carrier_ok(tp->dev))
4266 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4269 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4270 tw32(PCIE_PWR_MGMT_THRESH, val);
4276 /* This is called whenever we suspect that the system chipset is re-
4277 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4278 * is bogus tx completions. We try to recover by setting the
4279 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4282 static void tg3_tx_recover(struct tg3 *tp)
4284 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4285 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4287 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4288 "mapped I/O cycles to the network device, attempting to "
4289 "recover. Please report the problem to the driver maintainer "
4290 "and include system chipset information.\n", tp->dev->name);
4292 spin_lock(&tp->lock);
4293 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4294 spin_unlock(&tp->lock);
4297 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4300 return tnapi->tx_pending -
4301 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4304 /* Tigon3 never reports partial packet sends. So we do not
4305 * need special logic to handle SKBs that have not had all
4306 * of their frags sent yet, like SunGEM does.
4308 static void tg3_tx(struct tg3_napi *tnapi)
4310 struct tg3 *tp = tnapi->tp;
4311 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4312 u32 sw_idx = tnapi->tx_cons;
4313 struct netdev_queue *txq;
4314 int index = tnapi - tp->napi;
4316 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
4319 txq = netdev_get_tx_queue(tp->dev, index);
4321 while (sw_idx != hw_idx) {
4322 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4323 struct sk_buff *skb = ri->skb;
4326 if (unlikely(skb == NULL)) {
4331 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4335 sw_idx = NEXT_TX(sw_idx);
4337 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4338 ri = &tnapi->tx_buffers[sw_idx];
4339 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4341 sw_idx = NEXT_TX(sw_idx);
4346 if (unlikely(tx_bug)) {
4352 tnapi->tx_cons = sw_idx;
4354 /* Need to make the tx_cons update visible to tg3_start_xmit()
4355 * before checking for netif_queue_stopped(). Without the
4356 * memory barrier, there is a small possibility that tg3_start_xmit()
4357 * will miss it and cause the queue to be stopped forever.
4361 if (unlikely(netif_tx_queue_stopped(txq) &&
4362 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4363 __netif_tx_lock(txq, smp_processor_id());
4364 if (netif_tx_queue_stopped(txq) &&
4365 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4366 netif_tx_wake_queue(txq);
4367 __netif_tx_unlock(txq);
4371 /* Returns size of skb allocated or < 0 on error.
4373 * We only need to fill in the address because the other members
4374 * of the RX descriptor are invariant, see tg3_init_rings.
4376 * Note the purposeful assymetry of cpu vs. chip accesses. For
4377 * posting buffers we only dirty the first cache line of the RX
4378 * descriptor (containing the address). Whereas for the RX status
4379 * buffers the cpu only reads the last cacheline of the RX descriptor
4380 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4382 static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4383 int src_idx, u32 dest_idx_unmasked)
4385 struct tg3 *tp = tnapi->tp;
4386 struct tg3_rx_buffer_desc *desc;
4387 struct ring_info *map, *src_map;
4388 struct sk_buff *skb;
4390 int skb_size, dest_idx;
4391 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4394 switch (opaque_key) {
4395 case RXD_OPAQUE_RING_STD:
4396 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4397 desc = &tpr->rx_std[dest_idx];
4398 map = &tpr->rx_std_buffers[dest_idx];
4400 src_map = &tpr->rx_std_buffers[src_idx];
4401 skb_size = tp->rx_pkt_map_sz;
4404 case RXD_OPAQUE_RING_JUMBO:
4405 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4406 desc = &tpr->rx_jmb[dest_idx].std;
4407 map = &tpr->rx_jmb_buffers[dest_idx];
4409 src_map = &tpr->rx_jmb_buffers[src_idx];
4410 skb_size = TG3_RX_JMB_MAP_SZ;
4417 /* Do not overwrite any of the map or rp information
4418 * until we are sure we can commit to a new buffer.
4420 * Callers depend upon this behavior and assume that
4421 * we leave everything unchanged if we fail.
4423 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4427 skb_reserve(skb, tp->rx_offset);
4429 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4430 PCI_DMA_FROMDEVICE);
4433 pci_unmap_addr_set(map, mapping, mapping);
4435 if (src_map != NULL)
4436 src_map->skb = NULL;
4438 desc->addr_hi = ((u64)mapping >> 32);
4439 desc->addr_lo = ((u64)mapping & 0xffffffff);
4444 /* We only need to move over in the address because the other
4445 * members of the RX descriptor are invariant. See notes above
4446 * tg3_alloc_rx_skb for full details.
4448 static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4449 int src_idx, u32 dest_idx_unmasked)
4451 struct tg3 *tp = tnapi->tp;
4452 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4453 struct ring_info *src_map, *dest_map;
4455 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4457 switch (opaque_key) {
4458 case RXD_OPAQUE_RING_STD:
4459 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4460 dest_desc = &tpr->rx_std[dest_idx];
4461 dest_map = &tpr->rx_std_buffers[dest_idx];
4462 src_desc = &tpr->rx_std[src_idx];
4463 src_map = &tpr->rx_std_buffers[src_idx];
4466 case RXD_OPAQUE_RING_JUMBO:
4467 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4468 dest_desc = &tpr->rx_jmb[dest_idx].std;
4469 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4470 src_desc = &tpr->rx_jmb[src_idx].std;
4471 src_map = &tpr->rx_jmb_buffers[src_idx];
4478 dest_map->skb = src_map->skb;
4479 pci_unmap_addr_set(dest_map, mapping,
4480 pci_unmap_addr(src_map, mapping));
4481 dest_desc->addr_hi = src_desc->addr_hi;
4482 dest_desc->addr_lo = src_desc->addr_lo;
4484 src_map->skb = NULL;
4487 /* The RX ring scheme is composed of multiple rings which post fresh
4488 * buffers to the chip, and one special ring the chip uses to report
4489 * status back to the host.
4491 * The special ring reports the status of received packets to the
4492 * host. The chip does not write into the original descriptor the
4493 * RX buffer was obtained from. The chip simply takes the original
4494 * descriptor as provided by the host, updates the status and length
4495 * field, then writes this into the next status ring entry.
4497 * Each ring the host uses to post buffers to the chip is described
4498 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4499 * it is first placed into the on-chip ram. When the packet's length
4500 * is known, it walks down the TG3_BDINFO entries to select the ring.
4501 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4502 * which is within the range of the new packet's length is chosen.
4504 * The "separate ring for rx status" scheme may sound queer, but it makes
4505 * sense from a cache coherency perspective. If only the host writes
4506 * to the buffer post rings, and only the chip writes to the rx status
4507 * rings, then cache lines never move beyond shared-modified state.
4508 * If both the host and chip were to write into the same ring, cache line
4509 * eviction could occur since both entities want it in an exclusive state.
4511 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4513 struct tg3 *tp = tnapi->tp;
4514 u32 work_mask, rx_std_posted = 0;
4515 u32 sw_idx = tnapi->rx_rcb_ptr;
4518 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4520 hw_idx = tnapi->hw_status->idx[0].rx_producer;
4522 * We need to order the read of hw_idx and the read of
4523 * the opaque cookie.
4528 while (sw_idx != hw_idx && budget > 0) {
4529 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4531 struct sk_buff *skb;
4532 dma_addr_t dma_addr;
4533 u32 opaque_key, desc_idx, *post_ptr;
4535 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4536 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4537 if (opaque_key == RXD_OPAQUE_RING_STD) {
4538 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4539 dma_addr = pci_unmap_addr(ri, mapping);
4541 post_ptr = &tpr->rx_std_ptr;
4543 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4544 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4545 dma_addr = pci_unmap_addr(ri, mapping);
4547 post_ptr = &tpr->rx_jmb_ptr;
4549 goto next_pkt_nopost;
4551 work_mask |= opaque_key;
4553 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4554 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4556 tg3_recycle_rx(tnapi, opaque_key,
4557 desc_idx, *post_ptr);
4559 /* Other statistics kept track of by card. */
4560 tp->net_stats.rx_dropped++;
4564 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4567 if (len > RX_COPY_THRESHOLD
4568 && tp->rx_offset == NET_IP_ALIGN
4569 /* rx_offset will likely not equal NET_IP_ALIGN
4570 * if this is a 5701 card running in PCI-X mode
4571 * [see tg3_get_invariants()]
4576 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
4577 desc_idx, *post_ptr);
4581 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4582 PCI_DMA_FROMDEVICE);
4586 struct sk_buff *copy_skb;
4588 tg3_recycle_rx(tnapi, opaque_key,
4589 desc_idx, *post_ptr);
4591 copy_skb = netdev_alloc_skb(tp->dev,
4592 len + TG3_RAW_IP_ALIGN);
4593 if (copy_skb == NULL)
4594 goto drop_it_no_recycle;
4596 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4597 skb_put(copy_skb, len);
4598 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4599 skb_copy_from_linear_data(skb, copy_skb->data, len);
4600 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4602 /* We'll reuse the original ring buffer. */
4606 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4607 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4608 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4609 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4610 skb->ip_summed = CHECKSUM_UNNECESSARY;
4612 skb->ip_summed = CHECKSUM_NONE;
4614 skb->protocol = eth_type_trans(skb, tp->dev);
4616 if (len > (tp->dev->mtu + ETH_HLEN) &&
4617 skb->protocol != htons(ETH_P_8021Q)) {
4622 #if TG3_VLAN_TAG_USED
4623 if (tp->vlgrp != NULL &&
4624 desc->type_flags & RXD_FLAG_VLAN) {
4625 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4626 desc->err_vlan & RXD_VLAN_MASK, skb);
4629 napi_gro_receive(&tnapi->napi, skb);
4637 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4638 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4640 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4641 TG3_64BIT_REG_LOW, idx);
4642 work_mask &= ~RXD_OPAQUE_RING_STD;
4647 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4649 /* Refresh hw_idx to see if there is new work */
4650 if (sw_idx == hw_idx) {
4651 hw_idx = tnapi->hw_status->idx[0].rx_producer;
4656 /* ACK the status ring. */
4657 tnapi->rx_rcb_ptr = sw_idx;
4658 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4660 /* Refill RX ring(s). */
4661 if (work_mask & RXD_OPAQUE_RING_STD) {
4662 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4663 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4666 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4667 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4668 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4676 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4678 struct tg3 *tp = tnapi->tp;
4679 struct tg3_hw_status *sblk = tnapi->hw_status;
4681 /* handle link change and other phy events */
4682 if (!(tp->tg3_flags &
4683 (TG3_FLAG_USE_LINKCHG_REG |
4684 TG3_FLAG_POLL_SERDES))) {
4685 if (sblk->status & SD_STATUS_LINK_CHG) {
4686 sblk->status = SD_STATUS_UPDATED |
4687 (sblk->status & ~SD_STATUS_LINK_CHG);
4688 spin_lock(&tp->lock);
4689 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4691 (MAC_STATUS_SYNC_CHANGED |
4692 MAC_STATUS_CFG_CHANGED |
4693 MAC_STATUS_MI_COMPLETION |
4694 MAC_STATUS_LNKSTATE_CHANGED));
4697 tg3_setup_phy(tp, 0);
4698 spin_unlock(&tp->lock);
4702 /* run TX completion thread */
4703 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4705 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4709 /* run RX thread, within the bounds set by NAPI.
4710 * All RX "locking" is done by ensuring outside
4711 * code synchronizes with tg3->napi.poll()
4713 if (sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
4714 work_done += tg3_rx(tnapi, budget - work_done);
4719 static int tg3_poll(struct napi_struct *napi, int budget)
4721 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4722 struct tg3 *tp = tnapi->tp;
4724 struct tg3_hw_status *sblk = tnapi->hw_status;
4727 work_done = tg3_poll_work(tnapi, work_done, budget);
4729 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4732 if (unlikely(work_done >= budget))
4735 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4736 /* tp->last_tag is used in tg3_int_reenable() below
4737 * to tell the hw how much work has been processed,
4738 * so we must read it before checking for more work.
4740 tnapi->last_tag = sblk->status_tag;
4741 tnapi->last_irq_tag = tnapi->last_tag;
4744 sblk->status &= ~SD_STATUS_UPDATED;
4746 if (likely(!tg3_has_work(tnapi))) {
4747 napi_complete(napi);
4748 tg3_int_reenable(tnapi);
4756 /* work_done is guaranteed to be less than budget. */
4757 napi_complete(napi);
4758 schedule_work(&tp->reset_task);
4762 static void tg3_irq_quiesce(struct tg3 *tp)
4766 BUG_ON(tp->irq_sync);
4771 for (i = 0; i < tp->irq_cnt; i++)
4772 synchronize_irq(tp->napi[i].irq_vec);
4775 static inline int tg3_irq_sync(struct tg3 *tp)
4777 return tp->irq_sync;
4780 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4781 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4782 * with as well. Most of the time, this is not necessary except when
4783 * shutting down the device.
4785 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4787 spin_lock_bh(&tp->lock);
4789 tg3_irq_quiesce(tp);
4792 static inline void tg3_full_unlock(struct tg3 *tp)
4794 spin_unlock_bh(&tp->lock);
4797 /* One-shot MSI handler - Chip automatically disables interrupt
4798 * after sending MSI so driver doesn't have to do it.
4800 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4802 struct tg3_napi *tnapi = dev_id;
4803 struct tg3 *tp = tnapi->tp;
4805 prefetch(tnapi->hw_status);
4806 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4808 if (likely(!tg3_irq_sync(tp)))
4809 napi_schedule(&tnapi->napi);
4814 /* MSI ISR - No need to check for interrupt sharing and no need to
4815 * flush status block and interrupt mailbox. PCI ordering rules
4816 * guarantee that MSI will arrive after the status block.
4818 static irqreturn_t tg3_msi(int irq, void *dev_id)
4820 struct tg3_napi *tnapi = dev_id;
4821 struct tg3 *tp = tnapi->tp;
4823 prefetch(tnapi->hw_status);
4824 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4826 * Writing any value to intr-mbox-0 clears PCI INTA# and
4827 * chip-internal interrupt pending events.
4828 * Writing non-zero to intr-mbox-0 additional tells the
4829 * NIC to stop sending us irqs, engaging "in-intr-handler"
4832 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4833 if (likely(!tg3_irq_sync(tp)))
4834 napi_schedule(&tnapi->napi);
4836 return IRQ_RETVAL(1);
4839 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4841 struct tg3_napi *tnapi = dev_id;
4842 struct tg3 *tp = tnapi->tp;
4843 struct tg3_hw_status *sblk = tnapi->hw_status;
4844 unsigned int handled = 1;
4846 /* In INTx mode, it is possible for the interrupt to arrive at
4847 * the CPU before the status block posted prior to the interrupt.
4848 * Reading the PCI State register will confirm whether the
4849 * interrupt is ours and will flush the status block.
4851 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4852 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4853 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4860 * Writing any value to intr-mbox-0 clears PCI INTA# and
4861 * chip-internal interrupt pending events.
4862 * Writing non-zero to intr-mbox-0 additional tells the
4863 * NIC to stop sending us irqs, engaging "in-intr-handler"
4866 * Flush the mailbox to de-assert the IRQ immediately to prevent
4867 * spurious interrupts. The flush impacts performance but
4868 * excessive spurious interrupts can be worse in some cases.
4870 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4871 if (tg3_irq_sync(tp))
4873 sblk->status &= ~SD_STATUS_UPDATED;
4874 if (likely(tg3_has_work(tnapi))) {
4875 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4876 napi_schedule(&tnapi->napi);
4878 /* No work, shared interrupt perhaps? re-enable
4879 * interrupts, and flush that PCI write
4881 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4885 return IRQ_RETVAL(handled);
4888 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4890 struct tg3_napi *tnapi = dev_id;
4891 struct tg3 *tp = tnapi->tp;
4892 struct tg3_hw_status *sblk = tnapi->hw_status;
4893 unsigned int handled = 1;
4895 /* In INTx mode, it is possible for the interrupt to arrive at
4896 * the CPU before the status block posted prior to the interrupt.
4897 * Reading the PCI State register will confirm whether the
4898 * interrupt is ours and will flush the status block.
4900 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
4901 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4902 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4909 * writing any value to intr-mbox-0 clears PCI INTA# and
4910 * chip-internal interrupt pending events.
4911 * writing non-zero to intr-mbox-0 additional tells the
4912 * NIC to stop sending us irqs, engaging "in-intr-handler"
4915 * Flush the mailbox to de-assert the IRQ immediately to prevent
4916 * spurious interrupts. The flush impacts performance but
4917 * excessive spurious interrupts can be worse in some cases.
4919 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4922 * In a shared interrupt configuration, sometimes other devices'
4923 * interrupts will scream. We record the current status tag here
4924 * so that the above check can report that the screaming interrupts
4925 * are unhandled. Eventually they will be silenced.
4927 tnapi->last_irq_tag = sblk->status_tag;
4929 if (tg3_irq_sync(tp))
4932 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4934 napi_schedule(&tnapi->napi);
4937 return IRQ_RETVAL(handled);
4940 /* ISR for interrupt test */
4941 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4943 struct tg3_napi *tnapi = dev_id;
4944 struct tg3 *tp = tnapi->tp;
4945 struct tg3_hw_status *sblk = tnapi->hw_status;
4947 if ((sblk->status & SD_STATUS_UPDATED) ||
4948 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4949 tg3_disable_ints(tp);
4950 return IRQ_RETVAL(1);
4952 return IRQ_RETVAL(0);
4955 static int tg3_init_hw(struct tg3 *, int);
4956 static int tg3_halt(struct tg3 *, int, int);
4958 /* Restart hardware after configuration changes, self-test, etc.
4959 * Invoked with tp->lock held.
4961 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4962 __releases(tp->lock)
4963 __acquires(tp->lock)
4967 err = tg3_init_hw(tp, reset_phy);
4969 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4970 "aborting.\n", tp->dev->name);
4971 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4972 tg3_full_unlock(tp);
4973 del_timer_sync(&tp->timer);
4975 tg3_napi_enable(tp);
4977 tg3_full_lock(tp, 0);
4982 #ifdef CONFIG_NET_POLL_CONTROLLER
4983 static void tg3_poll_controller(struct net_device *dev)
4986 struct tg3 *tp = netdev_priv(dev);
4988 for (i = 0; i < tp->irq_cnt; i++)
4989 tg3_interrupt(tp->napi[i].irq_vec, dev);
4993 static void tg3_reset_task(struct work_struct *work)
4995 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4997 unsigned int restart_timer;
4999 tg3_full_lock(tp, 0);
5001 if (!netif_running(tp->dev)) {
5002 tg3_full_unlock(tp);
5006 tg3_full_unlock(tp);
5012 tg3_full_lock(tp, 1);
5014 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5015 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5017 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5018 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5019 tp->write32_rx_mbox = tg3_write_flush_reg32;
5020 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5021 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5024 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5025 err = tg3_init_hw(tp, 1);
5029 tg3_netif_start(tp);
5032 mod_timer(&tp->timer, jiffies + 1);
5035 tg3_full_unlock(tp);
5041 static void tg3_dump_short_state(struct tg3 *tp)
5043 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5044 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5045 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5046 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5049 static void tg3_tx_timeout(struct net_device *dev)
5051 struct tg3 *tp = netdev_priv(dev);
5053 if (netif_msg_tx_err(tp)) {
5054 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5056 tg3_dump_short_state(tp);
5059 schedule_work(&tp->reset_task);
5062 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5063 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5065 u32 base = (u32) mapping & 0xffffffff;
5067 return ((base > 0xffffdcc0) &&
5068 (base + len + 8 < base));
5071 /* Test for DMA addresses > 40-bit */
5072 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5075 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5076 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5077 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5084 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5086 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5087 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5088 u32 last_plus_one, u32 *start,
5089 u32 base_flags, u32 mss)
5091 struct tg3_napi *tnapi = &tp->napi[0];
5092 struct sk_buff *new_skb;
5093 dma_addr_t new_addr = 0;
5097 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5098 new_skb = skb_copy(skb, GFP_ATOMIC);
5100 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5102 new_skb = skb_copy_expand(skb,
5103 skb_headroom(skb) + more_headroom,
5104 skb_tailroom(skb), GFP_ATOMIC);
5110 /* New SKB is guaranteed to be linear. */
5112 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5113 new_addr = skb_shinfo(new_skb)->dma_head;
5115 /* Make sure new skb does not cross any 4G boundaries.
5116 * Drop the packet if it does.
5118 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
5120 skb_dma_unmap(&tp->pdev->dev, new_skb,
5123 dev_kfree_skb(new_skb);
5126 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5127 base_flags, 1 | (mss << 1));
5128 *start = NEXT_TX(entry);
5132 /* Now clean up the sw ring entries. */
5134 while (entry != last_plus_one) {
5136 tnapi->tx_buffers[entry].skb = new_skb;
5138 tnapi->tx_buffers[entry].skb = NULL;
5139 entry = NEXT_TX(entry);
5143 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5149 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5150 dma_addr_t mapping, int len, u32 flags,
5153 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5154 int is_end = (mss_and_is_end & 0x1);
5155 u32 mss = (mss_and_is_end >> 1);
5159 flags |= TXD_FLAG_END;
5160 if (flags & TXD_FLAG_VLAN) {
5161 vlan_tag = flags >> 16;
5164 vlan_tag |= (mss << TXD_MSS_SHIFT);
5166 txd->addr_hi = ((u64) mapping >> 32);
5167 txd->addr_lo = ((u64) mapping & 0xffffffff);
5168 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5169 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5172 /* hard_start_xmit for devices that don't have any bugs and
5173 * support TG3_FLG2_HW_TSO_2 only.
5175 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5176 struct net_device *dev)
5178 struct tg3 *tp = netdev_priv(dev);
5179 u32 len, entry, base_flags, mss;
5180 struct skb_shared_info *sp;
5182 struct tg3_napi *tnapi;
5183 struct netdev_queue *txq;
5185 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5186 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5187 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5190 /* We are running in BH disabled context with netif_tx_lock
5191 * and TX reclaim runs via tp->napi.poll inside of a software
5192 * interrupt. Furthermore, IRQ processing runs lockless so we have
5193 * no IRQ context deadlocks to worry about either. Rejoice!
5195 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5196 if (!netif_tx_queue_stopped(txq)) {
5197 netif_tx_stop_queue(txq);
5199 /* This is a hard error, log it. */
5200 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5201 "queue awake!\n", dev->name);
5203 return NETDEV_TX_BUSY;
5206 entry = tnapi->tx_prod;
5209 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5210 int tcp_opt_len, ip_tcp_len;
5212 if (skb_header_cloned(skb) &&
5213 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5218 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5219 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
5221 struct iphdr *iph = ip_hdr(skb);
5223 tcp_opt_len = tcp_optlen(skb);
5224 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5227 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5228 mss |= (ip_tcp_len + tcp_opt_len) << 9;
5231 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5232 TXD_FLAG_CPU_POST_DMA);
5234 tcp_hdr(skb)->check = 0;
5237 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5238 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5239 #if TG3_VLAN_TAG_USED
5240 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5241 base_flags |= (TXD_FLAG_VLAN |
5242 (vlan_tx_tag_get(skb) << 16));
5245 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5250 sp = skb_shinfo(skb);
5252 mapping = sp->dma_head;
5254 tnapi->tx_buffers[entry].skb = skb;
5256 len = skb_headlen(skb);
5258 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5259 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5261 entry = NEXT_TX(entry);
5263 /* Now loop through additional data fragments, and queue them. */
5264 if (skb_shinfo(skb)->nr_frags > 0) {
5265 unsigned int i, last;
5267 last = skb_shinfo(skb)->nr_frags - 1;
5268 for (i = 0; i <= last; i++) {
5269 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5272 mapping = sp->dma_maps[i];
5273 tnapi->tx_buffers[entry].skb = NULL;
5275 tg3_set_txd(tnapi, entry, mapping, len,
5276 base_flags, (i == last) | (mss << 1));
5278 entry = NEXT_TX(entry);
5282 /* Packets are ready, update Tx producer idx local and on card. */
5283 tw32_tx_mbox(tnapi->prodmbox, entry);
5285 tnapi->tx_prod = entry;
5286 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5287 netif_tx_stop_queue(txq);
5288 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5289 netif_tx_wake_queue(txq);
5295 return NETDEV_TX_OK;
5298 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5299 struct net_device *);
5301 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5302 * TSO header is greater than 80 bytes.
5304 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5306 struct sk_buff *segs, *nskb;
5307 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5309 /* Estimate the number of fragments in the worst case */
5310 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5311 netif_stop_queue(tp->dev);
5312 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5313 return NETDEV_TX_BUSY;
5315 netif_wake_queue(tp->dev);
5318 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5320 goto tg3_tso_bug_end;
5326 tg3_start_xmit_dma_bug(nskb, tp->dev);
5332 return NETDEV_TX_OK;
5335 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5336 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5338 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5339 struct net_device *dev)
5341 struct tg3 *tp = netdev_priv(dev);
5342 u32 len, entry, base_flags, mss;
5343 struct skb_shared_info *sp;
5344 int would_hit_hwbug;
5346 struct tg3_napi *tnapi = &tp->napi[0];
5348 len = skb_headlen(skb);
5350 /* We are running in BH disabled context with netif_tx_lock
5351 * and TX reclaim runs via tp->napi.poll inside of a software
5352 * interrupt. Furthermore, IRQ processing runs lockless so we have
5353 * no IRQ context deadlocks to worry about either. Rejoice!
5355 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5356 if (!netif_queue_stopped(dev)) {
5357 netif_stop_queue(dev);
5359 /* This is a hard error, log it. */
5360 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5361 "queue awake!\n", dev->name);
5363 return NETDEV_TX_BUSY;
5366 entry = tnapi->tx_prod;
5368 if (skb->ip_summed == CHECKSUM_PARTIAL)
5369 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5371 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5373 int tcp_opt_len, ip_tcp_len, hdr_len;
5375 if (skb_header_cloned(skb) &&
5376 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5381 tcp_opt_len = tcp_optlen(skb);
5382 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5384 hdr_len = ip_tcp_len + tcp_opt_len;
5385 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5386 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5387 return (tg3_tso_bug(tp, skb));
5389 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5390 TXD_FLAG_CPU_POST_DMA);
5394 iph->tot_len = htons(mss + hdr_len);
5395 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5396 tcp_hdr(skb)->check = 0;
5397 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5399 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5404 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5405 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5406 if (tcp_opt_len || iph->ihl > 5) {
5409 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5410 mss |= (tsflags << 11);
5413 if (tcp_opt_len || iph->ihl > 5) {
5416 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5417 base_flags |= tsflags << 12;
5421 #if TG3_VLAN_TAG_USED
5422 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5423 base_flags |= (TXD_FLAG_VLAN |
5424 (vlan_tx_tag_get(skb) << 16));
5427 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5432 sp = skb_shinfo(skb);
5434 mapping = sp->dma_head;
5436 tnapi->tx_buffers[entry].skb = skb;
5438 would_hit_hwbug = 0;
5440 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5441 would_hit_hwbug = 1;
5442 else if (tg3_4g_overflow_test(mapping, len))
5443 would_hit_hwbug = 1;
5445 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5446 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5448 entry = NEXT_TX(entry);
5450 /* Now loop through additional data fragments, and queue them. */
5451 if (skb_shinfo(skb)->nr_frags > 0) {
5452 unsigned int i, last;
5454 last = skb_shinfo(skb)->nr_frags - 1;
5455 for (i = 0; i <= last; i++) {
5456 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5459 mapping = sp->dma_maps[i];
5461 tnapi->tx_buffers[entry].skb = NULL;
5463 if (tg3_4g_overflow_test(mapping, len))
5464 would_hit_hwbug = 1;
5466 if (tg3_40bit_overflow_test(tp, mapping, len))
5467 would_hit_hwbug = 1;
5469 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5470 tg3_set_txd(tnapi, entry, mapping, len,
5471 base_flags, (i == last)|(mss << 1));
5473 tg3_set_txd(tnapi, entry, mapping, len,
5474 base_flags, (i == last));
5476 entry = NEXT_TX(entry);
5480 if (would_hit_hwbug) {
5481 u32 last_plus_one = entry;
5484 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5485 start &= (TG3_TX_RING_SIZE - 1);
5487 /* If the workaround fails due to memory/mapping
5488 * failure, silently drop this packet.
5490 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5491 &start, base_flags, mss))
5497 /* Packets are ready, update Tx producer idx local and on card. */
5498 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
5500 tnapi->tx_prod = entry;
5501 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5502 netif_stop_queue(dev);
5503 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5504 netif_wake_queue(tp->dev);
5510 return NETDEV_TX_OK;
5513 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5518 if (new_mtu > ETH_DATA_LEN) {
5519 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5520 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5521 ethtool_op_set_tso(dev, 0);
5524 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5526 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5527 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5528 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5532 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5534 struct tg3 *tp = netdev_priv(dev);
5537 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5540 if (!netif_running(dev)) {
5541 /* We'll just catch it later when the
5544 tg3_set_mtu(dev, tp, new_mtu);
5552 tg3_full_lock(tp, 1);
5554 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5556 tg3_set_mtu(dev, tp, new_mtu);
5558 err = tg3_restart_hw(tp, 0);
5561 tg3_netif_start(tp);
5563 tg3_full_unlock(tp);
5571 static void tg3_rx_prodring_free(struct tg3 *tp,
5572 struct tg3_rx_prodring_set *tpr)
5575 struct ring_info *rxp;
5577 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5578 rxp = &tpr->rx_std_buffers[i];
5580 if (rxp->skb == NULL)
5583 pci_unmap_single(tp->pdev,
5584 pci_unmap_addr(rxp, mapping),
5586 PCI_DMA_FROMDEVICE);
5587 dev_kfree_skb_any(rxp->skb);
5591 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5592 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5593 rxp = &tpr->rx_jmb_buffers[i];
5595 if (rxp->skb == NULL)
5598 pci_unmap_single(tp->pdev,
5599 pci_unmap_addr(rxp, mapping),
5601 PCI_DMA_FROMDEVICE);
5602 dev_kfree_skb_any(rxp->skb);
5608 /* Initialize tx/rx rings for packet processing.
5610 * The chip has been shut down and the driver detached from
5611 * the networking, so no interrupts or new tx packets will
5612 * end up in the driver. tp->{tx,}lock are held and thus
5615 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5616 struct tg3_rx_prodring_set *tpr)
5618 u32 i, rx_pkt_dma_sz;
5619 struct tg3_napi *tnapi = &tp->napi[0];
5621 /* Zero out all descriptors. */
5622 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5624 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5625 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5626 tp->dev->mtu > ETH_DATA_LEN)
5627 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5628 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5630 /* Initialize invariants of the rings, we only set this
5631 * stuff once. This works because the card does not
5632 * write into the rx buffer posting rings.
5634 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5635 struct tg3_rx_buffer_desc *rxd;
5637 rxd = &tpr->rx_std[i];
5638 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5639 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5640 rxd->opaque = (RXD_OPAQUE_RING_STD |
5641 (i << RXD_OPAQUE_INDEX_SHIFT));
5644 /* Now allocate fresh SKBs for each rx ring. */
5645 for (i = 0; i < tp->rx_pending; i++) {
5646 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5647 printk(KERN_WARNING PFX
5648 "%s: Using a smaller RX standard ring, "
5649 "only %d out of %d buffers were allocated "
5651 tp->dev->name, i, tp->rx_pending);
5659 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5662 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5664 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5665 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5666 struct tg3_rx_buffer_desc *rxd;
5668 rxd = &tpr->rx_jmb[i].std;
5669 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5670 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5672 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5673 (i << RXD_OPAQUE_INDEX_SHIFT));
5676 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5677 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
5679 printk(KERN_WARNING PFX
5680 "%s: Using a smaller RX jumbo ring, "
5681 "only %d out of %d buffers were "
5682 "allocated successfully.\n",
5683 tp->dev->name, i, tp->rx_jumbo_pending);
5686 tp->rx_jumbo_pending = i;
5696 tg3_rx_prodring_free(tp, tpr);
5700 static void tg3_rx_prodring_fini(struct tg3 *tp,
5701 struct tg3_rx_prodring_set *tpr)
5703 kfree(tpr->rx_std_buffers);
5704 tpr->rx_std_buffers = NULL;
5705 kfree(tpr->rx_jmb_buffers);
5706 tpr->rx_jmb_buffers = NULL;
5708 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5709 tpr->rx_std, tpr->rx_std_mapping);
5713 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5714 tpr->rx_jmb, tpr->rx_jmb_mapping);
5719 static int tg3_rx_prodring_init(struct tg3 *tp,
5720 struct tg3_rx_prodring_set *tpr)
5722 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5723 TG3_RX_RING_SIZE, GFP_KERNEL);
5724 if (!tpr->rx_std_buffers)
5727 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5728 &tpr->rx_std_mapping);
5732 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5733 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5734 TG3_RX_JUMBO_RING_SIZE,
5736 if (!tpr->rx_jmb_buffers)
5739 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5740 TG3_RX_JUMBO_RING_BYTES,
5741 &tpr->rx_jmb_mapping);
5749 tg3_rx_prodring_fini(tp, tpr);
5753 /* Free up pending packets in all rx/tx rings.
5755 * The chip has been shut down and the driver detached from
5756 * the networking, so no interrupts or new tx packets will
5757 * end up in the driver. tp->{tx,}lock is not held and we are not
5758 * in an interrupt context and thus may sleep.
5760 static void tg3_free_rings(struct tg3 *tp)
5764 for (j = 0; j < tp->irq_cnt; j++) {
5765 struct tg3_napi *tnapi = &tp->napi[j];
5767 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5768 struct tx_ring_info *txp;
5769 struct sk_buff *skb;
5771 txp = &tnapi->tx_buffers[i];
5779 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5783 i += skb_shinfo(skb)->nr_frags + 1;
5785 dev_kfree_skb_any(skb);
5789 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5792 /* Initialize tx/rx rings for packet processing.
5794 * The chip has been shut down and the driver detached from
5795 * the networking, so no interrupts or new tx packets will
5796 * end up in the driver. tp->{tx,}lock are held and thus
5799 static int tg3_init_rings(struct tg3 *tp)
5803 /* Free up all the SKBs. */
5806 for (i = 0; i < tp->irq_cnt; i++) {
5807 struct tg3_napi *tnapi = &tp->napi[i];
5809 tnapi->last_tag = 0;
5810 tnapi->last_irq_tag = 0;
5811 tnapi->hw_status->status = 0;
5812 tnapi->hw_status->status_tag = 0;
5813 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5817 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5819 tnapi->rx_rcb_ptr = 0;
5820 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5823 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5827 * Must not be invoked with interrupt sources disabled and
5828 * the hardware shutdown down.
5830 static void tg3_free_consistent(struct tg3 *tp)
5834 for (i = 0; i < tp->irq_cnt; i++) {
5835 struct tg3_napi *tnapi = &tp->napi[i];
5837 if (tnapi->tx_ring) {
5838 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5839 tnapi->tx_ring, tnapi->tx_desc_mapping);
5840 tnapi->tx_ring = NULL;
5843 kfree(tnapi->tx_buffers);
5844 tnapi->tx_buffers = NULL;
5846 if (tnapi->rx_rcb) {
5847 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5849 tnapi->rx_rcb_mapping);
5850 tnapi->rx_rcb = NULL;
5853 if (tnapi->hw_status) {
5854 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5856 tnapi->status_mapping);
5857 tnapi->hw_status = NULL;
5862 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5863 tp->hw_stats, tp->stats_mapping);
5864 tp->hw_stats = NULL;
5867 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5871 * Must not be invoked with interrupt sources disabled and
5872 * the hardware shutdown down. Can sleep.
5874 static int tg3_alloc_consistent(struct tg3 *tp)
5878 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5881 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5882 sizeof(struct tg3_hw_stats),
5883 &tp->stats_mapping);
5887 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5889 for (i = 0; i < tp->irq_cnt; i++) {
5890 struct tg3_napi *tnapi = &tp->napi[i];
5892 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
5894 &tnapi->status_mapping);
5895 if (!tnapi->hw_status)
5898 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5900 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
5901 TG3_RX_RCB_RING_BYTES(tp),
5902 &tnapi->rx_rcb_mapping);
5906 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5908 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
5909 TG3_TX_RING_SIZE, GFP_KERNEL);
5910 if (!tnapi->tx_buffers)
5913 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
5915 &tnapi->tx_desc_mapping);
5916 if (!tnapi->tx_ring)
5923 tg3_free_consistent(tp);
5927 #define MAX_WAIT_CNT 1000
5929 /* To stop a block, clear the enable bit and poll till it
5930 * clears. tp->lock is held.
5932 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5937 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5944 /* We can't enable/disable these bits of the
5945 * 5705/5750, just say success.
5958 for (i = 0; i < MAX_WAIT_CNT; i++) {
5961 if ((val & enable_bit) == 0)
5965 if (i == MAX_WAIT_CNT && !silent) {
5966 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5967 "ofs=%lx enable_bit=%x\n",
5975 /* tp->lock is held. */
5976 static int tg3_abort_hw(struct tg3 *tp, int silent)
5980 tg3_disable_ints(tp);
5982 tp->rx_mode &= ~RX_MODE_ENABLE;
5983 tw32_f(MAC_RX_MODE, tp->rx_mode);
5986 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5987 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5988 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5989 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5990 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5991 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5993 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5994 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5995 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5996 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5997 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5998 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5999 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6001 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6002 tw32_f(MAC_MODE, tp->mac_mode);
6005 tp->tx_mode &= ~TX_MODE_ENABLE;
6006 tw32_f(MAC_TX_MODE, tp->tx_mode);
6008 for (i = 0; i < MAX_WAIT_CNT; i++) {
6010 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6013 if (i >= MAX_WAIT_CNT) {
6014 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6015 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6016 tp->dev->name, tr32(MAC_TX_MODE));
6020 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6021 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6022 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6024 tw32(FTQ_RESET, 0xffffffff);
6025 tw32(FTQ_RESET, 0x00000000);
6027 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6028 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6030 for (i = 0; i < tp->irq_cnt; i++) {
6031 struct tg3_napi *tnapi = &tp->napi[i];
6032 if (tnapi->hw_status)
6033 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6036 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6041 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6046 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6047 if (apedata != APE_SEG_SIG_MAGIC)
6050 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6051 if (!(apedata & APE_FW_STATUS_READY))
6054 /* Wait for up to 1 millisecond for APE to service previous event. */
6055 for (i = 0; i < 10; i++) {
6056 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6059 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6061 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6062 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6063 event | APE_EVENT_STATUS_EVENT_PENDING);
6065 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6067 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6073 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6074 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6077 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6082 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6086 case RESET_KIND_INIT:
6087 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6088 APE_HOST_SEG_SIG_MAGIC);
6089 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6090 APE_HOST_SEG_LEN_MAGIC);
6091 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6092 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6093 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6094 APE_HOST_DRIVER_ID_MAGIC);
6095 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6096 APE_HOST_BEHAV_NO_PHYLOCK);
6098 event = APE_EVENT_STATUS_STATE_START;
6100 case RESET_KIND_SHUTDOWN:
6101 /* With the interface we are currently using,
6102 * APE does not track driver state. Wiping
6103 * out the HOST SEGMENT SIGNATURE forces
6104 * the APE to assume OS absent status.
6106 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6108 event = APE_EVENT_STATUS_STATE_UNLOAD;
6110 case RESET_KIND_SUSPEND:
6111 event = APE_EVENT_STATUS_STATE_SUSPEND;
6117 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6119 tg3_ape_send_event(tp, event);
6122 /* tp->lock is held. */
6123 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6125 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6126 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6128 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6130 case RESET_KIND_INIT:
6131 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6135 case RESET_KIND_SHUTDOWN:
6136 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6140 case RESET_KIND_SUSPEND:
6141 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6150 if (kind == RESET_KIND_INIT ||
6151 kind == RESET_KIND_SUSPEND)
6152 tg3_ape_driver_state_change(tp, kind);
6155 /* tp->lock is held. */
6156 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6158 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6160 case RESET_KIND_INIT:
6161 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6162 DRV_STATE_START_DONE);
6165 case RESET_KIND_SHUTDOWN:
6166 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6167 DRV_STATE_UNLOAD_DONE);
6175 if (kind == RESET_KIND_SHUTDOWN)
6176 tg3_ape_driver_state_change(tp, kind);
6179 /* tp->lock is held. */
6180 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6182 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6184 case RESET_KIND_INIT:
6185 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6189 case RESET_KIND_SHUTDOWN:
6190 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6194 case RESET_KIND_SUSPEND:
6195 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6205 static int tg3_poll_fw(struct tg3 *tp)
6210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6211 /* Wait up to 20ms for init done. */
6212 for (i = 0; i < 200; i++) {
6213 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6220 /* Wait for firmware initialization to complete. */
6221 for (i = 0; i < 100000; i++) {
6222 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6223 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6228 /* Chip might not be fitted with firmware. Some Sun onboard
6229 * parts are configured like that. So don't signal the timeout
6230 * of the above loop as an error, but do report the lack of
6231 * running firmware once.
6234 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6235 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6237 printk(KERN_INFO PFX "%s: No firmware running.\n",
6244 /* Save PCI command register before chip reset */
6245 static void tg3_save_pci_state(struct tg3 *tp)
6247 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6250 /* Restore PCI state after chip reset */
6251 static void tg3_restore_pci_state(struct tg3 *tp)
6255 /* Re-enable indirect register accesses. */
6256 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6257 tp->misc_host_ctrl);
6259 /* Set MAX PCI retry to zero. */
6260 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6261 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6262 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6263 val |= PCISTATE_RETRY_SAME_DMA;
6264 /* Allow reads and writes to the APE register and memory space. */
6265 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6266 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6267 PCISTATE_ALLOW_APE_SHMEM_WR;
6268 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6270 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6272 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6273 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6274 pcie_set_readrq(tp->pdev, 4096);
6276 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6277 tp->pci_cacheline_sz);
6278 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6283 /* Make sure PCI-X relaxed ordering bit is clear. */
6284 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6287 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6289 pcix_cmd &= ~PCI_X_CMD_ERO;
6290 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6294 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6296 /* Chip reset on 5780 will reset MSI enable bit,
6297 * so need to restore it.
6299 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6302 pci_read_config_word(tp->pdev,
6303 tp->msi_cap + PCI_MSI_FLAGS,
6305 pci_write_config_word(tp->pdev,
6306 tp->msi_cap + PCI_MSI_FLAGS,
6307 ctrl | PCI_MSI_FLAGS_ENABLE);
6308 val = tr32(MSGINT_MODE);
6309 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6314 static void tg3_stop_fw(struct tg3 *);
6316 /* tp->lock is held. */
6317 static int tg3_chip_reset(struct tg3 *tp)
6320 void (*write_op)(struct tg3 *, u32, u32);
6327 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6329 /* No matching tg3_nvram_unlock() after this because
6330 * chip reset below will undo the nvram lock.
6332 tp->nvram_lock_cnt = 0;
6334 /* GRC_MISC_CFG core clock reset will clear the memory
6335 * enable bit in PCI register 4 and the MSI enable bit
6336 * on some chips, so we save relevant registers here.
6338 tg3_save_pci_state(tp);
6340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6341 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6342 tw32(GRC_FASTBOOT_PC, 0);
6345 * We must avoid the readl() that normally takes place.
6346 * It locks machines, causes machine checks, and other
6347 * fun things. So, temporarily disable the 5701
6348 * hardware workaround, while we do the reset.
6350 write_op = tp->write32;
6351 if (write_op == tg3_write_flush_reg32)
6352 tp->write32 = tg3_write32;
6354 /* Prevent the irq handler from reading or writing PCI registers
6355 * during chip reset when the memory enable bit in the PCI command
6356 * register may be cleared. The chip does not generate interrupt
6357 * at this time, but the irq handler may still be called due to irq
6358 * sharing or irqpoll.
6360 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6361 for (i = 0; i < tp->irq_cnt; i++) {
6362 struct tg3_napi *tnapi = &tp->napi[i];
6363 if (tnapi->hw_status) {
6364 tnapi->hw_status->status = 0;
6365 tnapi->hw_status->status_tag = 0;
6367 tnapi->last_tag = 0;
6368 tnapi->last_irq_tag = 0;
6372 for (i = 0; i < tp->irq_cnt; i++)
6373 synchronize_irq(tp->napi[i].irq_vec);
6375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6376 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6377 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6381 val = GRC_MISC_CFG_CORECLK_RESET;
6383 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6384 if (tr32(0x7e2c) == 0x60) {
6387 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6388 tw32(GRC_MISC_CFG, (1 << 29));
6393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6394 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6395 tw32(GRC_VCPU_EXT_CTRL,
6396 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6399 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6400 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6401 tw32(GRC_MISC_CFG, val);
6403 /* restore 5701 hardware bug workaround write method */
6404 tp->write32 = write_op;
6406 /* Unfortunately, we have to delay before the PCI read back.
6407 * Some 575X chips even will not respond to a PCI cfg access
6408 * when the reset command is given to the chip.
6410 * How do these hardware designers expect things to work
6411 * properly if the PCI write is posted for a long period
6412 * of time? It is always necessary to have some method by
6413 * which a register read back can occur to push the write
6414 * out which does the reset.
6416 * For most tg3 variants the trick below was working.
6421 /* Flush PCI posted writes. The normal MMIO registers
6422 * are inaccessible at this time so this is the only
6423 * way to make this reliably (actually, this is no longer
6424 * the case, see above). I tried to use indirect
6425 * register read/write but this upset some 5701 variants.
6427 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6431 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6434 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6438 /* Wait for link training to complete. */
6439 for (i = 0; i < 5000; i++)
6442 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6443 pci_write_config_dword(tp->pdev, 0xc4,
6444 cfg_val | (1 << 15));
6447 /* Clear the "no snoop" and "relaxed ordering" bits. */
6448 pci_read_config_word(tp->pdev,
6449 tp->pcie_cap + PCI_EXP_DEVCTL,
6451 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6452 PCI_EXP_DEVCTL_NOSNOOP_EN);
6454 * Older PCIe devices only support the 128 byte
6455 * MPS setting. Enforce the restriction.
6457 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6458 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6459 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6460 pci_write_config_word(tp->pdev,
6461 tp->pcie_cap + PCI_EXP_DEVCTL,
6464 pcie_set_readrq(tp->pdev, 4096);
6466 /* Clear error status */
6467 pci_write_config_word(tp->pdev,
6468 tp->pcie_cap + PCI_EXP_DEVSTA,
6469 PCI_EXP_DEVSTA_CED |
6470 PCI_EXP_DEVSTA_NFED |
6471 PCI_EXP_DEVSTA_FED |
6472 PCI_EXP_DEVSTA_URD);
6475 tg3_restore_pci_state(tp);
6477 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6480 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6481 val = tr32(MEMARB_MODE);
6482 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6484 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6486 tw32(0x5000, 0x400);
6489 tw32(GRC_MODE, tp->grc_mode);
6491 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6494 tw32(0xc4, val | (1 << 15));
6497 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6499 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6500 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6501 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6502 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6505 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6506 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6507 tw32_f(MAC_MODE, tp->mac_mode);
6508 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6509 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6510 tw32_f(MAC_MODE, tp->mac_mode);
6511 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6512 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6513 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6514 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6515 tw32_f(MAC_MODE, tp->mac_mode);
6517 tw32_f(MAC_MODE, 0);
6520 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6522 err = tg3_poll_fw(tp);
6528 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6529 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6532 tw32(0x7c00, val | (1 << 25));
6535 /* Reprobe ASF enable state. */
6536 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6537 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6538 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6539 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6542 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6543 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6544 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6545 tp->last_event_jiffies = jiffies;
6546 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6547 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6554 /* tp->lock is held. */
6555 static void tg3_stop_fw(struct tg3 *tp)
6557 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6558 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6559 /* Wait for RX cpu to ACK the previous event. */
6560 tg3_wait_for_event_ack(tp);
6562 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6564 tg3_generate_fw_event(tp);
6566 /* Wait for RX cpu to ACK this event. */
6567 tg3_wait_for_event_ack(tp);
6571 /* tp->lock is held. */
6572 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6578 tg3_write_sig_pre_reset(tp, kind);
6580 tg3_abort_hw(tp, silent);
6581 err = tg3_chip_reset(tp);
6583 __tg3_set_mac_addr(tp, 0);
6585 tg3_write_sig_legacy(tp, kind);
6586 tg3_write_sig_post_reset(tp, kind);
6594 #define RX_CPU_SCRATCH_BASE 0x30000
6595 #define RX_CPU_SCRATCH_SIZE 0x04000
6596 #define TX_CPU_SCRATCH_BASE 0x34000
6597 #define TX_CPU_SCRATCH_SIZE 0x04000
6599 /* tp->lock is held. */
6600 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6604 BUG_ON(offset == TX_CPU_BASE &&
6605 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6608 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6610 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6613 if (offset == RX_CPU_BASE) {
6614 for (i = 0; i < 10000; i++) {
6615 tw32(offset + CPU_STATE, 0xffffffff);
6616 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6617 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6621 tw32(offset + CPU_STATE, 0xffffffff);
6622 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6625 for (i = 0; i < 10000; i++) {
6626 tw32(offset + CPU_STATE, 0xffffffff);
6627 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6628 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6634 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6637 (offset == RX_CPU_BASE ? "RX" : "TX"));
6641 /* Clear firmware's nvram arbitration. */
6642 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6643 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6648 unsigned int fw_base;
6649 unsigned int fw_len;
6650 const __be32 *fw_data;
6653 /* tp->lock is held. */
6654 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6655 int cpu_scratch_size, struct fw_info *info)
6657 int err, lock_err, i;
6658 void (*write_op)(struct tg3 *, u32, u32);
6660 if (cpu_base == TX_CPU_BASE &&
6661 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6662 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6663 "TX cpu firmware on %s which is 5705.\n",
6668 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6669 write_op = tg3_write_mem;
6671 write_op = tg3_write_indirect_reg32;
6673 /* It is possible that bootcode is still loading at this point.
6674 * Get the nvram lock first before halting the cpu.
6676 lock_err = tg3_nvram_lock(tp);
6677 err = tg3_halt_cpu(tp, cpu_base);
6679 tg3_nvram_unlock(tp);
6683 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6684 write_op(tp, cpu_scratch_base + i, 0);
6685 tw32(cpu_base + CPU_STATE, 0xffffffff);
6686 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6687 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6688 write_op(tp, (cpu_scratch_base +
6689 (info->fw_base & 0xffff) +
6691 be32_to_cpu(info->fw_data[i]));
6699 /* tp->lock is held. */
6700 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6702 struct fw_info info;
6703 const __be32 *fw_data;
6706 fw_data = (void *)tp->fw->data;
6708 /* Firmware blob starts with version numbers, followed by
6709 start address and length. We are setting complete length.
6710 length = end_address_of_bss - start_address_of_text.
6711 Remainder is the blob to be loaded contiguously
6712 from start address. */
6714 info.fw_base = be32_to_cpu(fw_data[1]);
6715 info.fw_len = tp->fw->size - 12;
6716 info.fw_data = &fw_data[3];
6718 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6719 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6724 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6725 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6730 /* Now startup only the RX cpu. */
6731 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6732 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6734 for (i = 0; i < 5; i++) {
6735 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6737 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6738 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6739 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6743 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6744 "to set RX CPU PC, is %08x should be %08x\n",
6745 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6749 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6750 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6755 /* 5705 needs a special version of the TSO firmware. */
6757 /* tp->lock is held. */
6758 static int tg3_load_tso_firmware(struct tg3 *tp)
6760 struct fw_info info;
6761 const __be32 *fw_data;
6762 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6765 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6768 fw_data = (void *)tp->fw->data;
6770 /* Firmware blob starts with version numbers, followed by
6771 start address and length. We are setting complete length.
6772 length = end_address_of_bss - start_address_of_text.
6773 Remainder is the blob to be loaded contiguously
6774 from start address. */
6776 info.fw_base = be32_to_cpu(fw_data[1]);
6777 cpu_scratch_size = tp->fw_len;
6778 info.fw_len = tp->fw->size - 12;
6779 info.fw_data = &fw_data[3];
6781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6782 cpu_base = RX_CPU_BASE;
6783 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6785 cpu_base = TX_CPU_BASE;
6786 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6787 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6790 err = tg3_load_firmware_cpu(tp, cpu_base,
6791 cpu_scratch_base, cpu_scratch_size,
6796 /* Now startup the cpu. */
6797 tw32(cpu_base + CPU_STATE, 0xffffffff);
6798 tw32_f(cpu_base + CPU_PC, info.fw_base);
6800 for (i = 0; i < 5; i++) {
6801 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6803 tw32(cpu_base + CPU_STATE, 0xffffffff);
6804 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6805 tw32_f(cpu_base + CPU_PC, info.fw_base);
6809 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6810 "to set CPU PC, is %08x should be %08x\n",
6811 tp->dev->name, tr32(cpu_base + CPU_PC),
6815 tw32(cpu_base + CPU_STATE, 0xffffffff);
6816 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6821 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6823 struct tg3 *tp = netdev_priv(dev);
6824 struct sockaddr *addr = p;
6825 int err = 0, skip_mac_1 = 0;
6827 if (!is_valid_ether_addr(addr->sa_data))
6830 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6832 if (!netif_running(dev))
6835 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6836 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6838 addr0_high = tr32(MAC_ADDR_0_HIGH);
6839 addr0_low = tr32(MAC_ADDR_0_LOW);
6840 addr1_high = tr32(MAC_ADDR_1_HIGH);
6841 addr1_low = tr32(MAC_ADDR_1_LOW);
6843 /* Skip MAC addr 1 if ASF is using it. */
6844 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6845 !(addr1_high == 0 && addr1_low == 0))
6848 spin_lock_bh(&tp->lock);
6849 __tg3_set_mac_addr(tp, skip_mac_1);
6850 spin_unlock_bh(&tp->lock);
6855 /* tp->lock is held. */
6856 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6857 dma_addr_t mapping, u32 maxlen_flags,
6861 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6862 ((u64) mapping >> 32));
6864 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6865 ((u64) mapping & 0xffffffff));
6867 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6870 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6872 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6876 static void __tg3_set_rx_mode(struct net_device *);
6877 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6879 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6880 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6881 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6882 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6883 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6884 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6885 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6887 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6888 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6889 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6890 u32 val = ec->stats_block_coalesce_usecs;
6892 if (!netif_carrier_ok(tp->dev))
6895 tw32(HOSTCC_STAT_COAL_TICKS, val);
6899 /* tp->lock is held. */
6900 static void tg3_rings_reset(struct tg3 *tp)
6903 u32 stblk, txrcb, rxrcb, limit;
6904 struct tg3_napi *tnapi = &tp->napi[0];
6906 /* Disable all transmit rings but the first. */
6907 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6908 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
6910 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
6912 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
6913 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
6914 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
6915 BDINFO_FLAGS_DISABLED);
6918 /* Disable all receive return rings but the first. */
6919 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6920 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
6921 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6922 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
6924 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
6926 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
6927 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
6928 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
6929 BDINFO_FLAGS_DISABLED);
6931 /* Disable interrupts */
6932 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
6934 /* Zero mailbox registers. */
6935 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
6936 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
6937 tp->napi[i].tx_prod = 0;
6938 tp->napi[i].tx_cons = 0;
6939 tw32_mailbox(tp->napi[i].prodmbox, 0);
6940 tw32_rx_mbox(tp->napi[i].consmbox, 0);
6941 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
6944 tp->napi[0].tx_prod = 0;
6945 tp->napi[0].tx_cons = 0;
6946 tw32_mailbox(tp->napi[0].prodmbox, 0);
6947 tw32_rx_mbox(tp->napi[0].consmbox, 0);
6950 /* Make sure the NIC-based send BD rings are disabled. */
6951 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6952 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6953 for (i = 0; i < 16; i++)
6954 tw32_tx_mbox(mbox + i * 8, 0);
6957 txrcb = NIC_SRAM_SEND_RCB;
6958 rxrcb = NIC_SRAM_RCV_RET_RCB;
6960 /* Clear status block in ram. */
6961 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6963 /* Set status block DMA address */
6964 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6965 ((u64) tnapi->status_mapping >> 32));
6966 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6967 ((u64) tnapi->status_mapping & 0xffffffff));
6969 if (tnapi->tx_ring) {
6970 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
6971 (TG3_TX_RING_SIZE <<
6972 BDINFO_FLAGS_MAXLEN_SHIFT),
6973 NIC_SRAM_TX_BUFFER_DESC);
6974 txrcb += TG3_BDINFO_SIZE;
6977 if (tnapi->rx_rcb) {
6978 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
6979 (TG3_RX_RCB_RING_SIZE(tp) <<
6980 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
6981 rxrcb += TG3_BDINFO_SIZE;
6984 stblk = HOSTCC_STATBLCK_RING1;
6986 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
6987 u64 mapping = (u64)tnapi->status_mapping;
6988 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
6989 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
6991 /* Clear status block in ram. */
6992 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6994 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
6995 (TG3_TX_RING_SIZE <<
6996 BDINFO_FLAGS_MAXLEN_SHIFT),
6997 NIC_SRAM_TX_BUFFER_DESC);
6999 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7000 (TG3_RX_RCB_RING_SIZE(tp) <<
7001 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7004 txrcb += TG3_BDINFO_SIZE;
7005 rxrcb += TG3_BDINFO_SIZE;
7009 /* tp->lock is held. */
7010 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7012 u32 val, rdmac_mode;
7014 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7016 tg3_disable_ints(tp);
7020 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7022 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7023 tg3_abort_hw(tp, 1);
7027 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7030 err = tg3_chip_reset(tp);
7034 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7036 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7037 val = tr32(TG3_CPMU_CTRL);
7038 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7039 tw32(TG3_CPMU_CTRL, val);
7041 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7042 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7043 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7044 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7046 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7047 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7048 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7049 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7051 val = tr32(TG3_CPMU_HST_ACC);
7052 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7053 val |= CPMU_HST_ACC_MACCLK_6_25;
7054 tw32(TG3_CPMU_HST_ACC, val);
7057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7058 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7059 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7060 PCIE_PWR_MGMT_L1_THRESH_4MS;
7061 tw32(PCIE_PWR_MGMT_THRESH, val);
7063 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7064 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7066 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7069 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
7070 val = tr32(TG3_PCIE_LNKCTL);
7071 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
7072 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7074 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7075 tw32(TG3_PCIE_LNKCTL, val);
7078 /* This works around an issue with Athlon chipsets on
7079 * B3 tigon3 silicon. This bit has no effect on any
7080 * other revision. But do not set this on PCI Express
7081 * chips and don't even touch the clocks if the CPMU is present.
7083 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7084 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7085 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7086 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7089 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7090 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7091 val = tr32(TG3PCI_PCISTATE);
7092 val |= PCISTATE_RETRY_SAME_DMA;
7093 tw32(TG3PCI_PCISTATE, val);
7096 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7097 /* Allow reads and writes to the
7098 * APE register and memory space.
7100 val = tr32(TG3PCI_PCISTATE);
7101 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7102 PCISTATE_ALLOW_APE_SHMEM_WR;
7103 tw32(TG3PCI_PCISTATE, val);
7106 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7107 /* Enable some hw fixes. */
7108 val = tr32(TG3PCI_MSI_DATA);
7109 val |= (1 << 26) | (1 << 28) | (1 << 29);
7110 tw32(TG3PCI_MSI_DATA, val);
7113 /* Descriptor ring init may make accesses to the
7114 * NIC SRAM area to setup the TX descriptors, so we
7115 * can only do this after the hardware has been
7116 * successfully reset.
7118 err = tg3_init_rings(tp);
7122 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7123 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7124 /* This value is determined during the probe time DMA
7125 * engine test, tg3_test_dma.
7127 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7130 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7131 GRC_MODE_4X_NIC_SEND_RINGS |
7132 GRC_MODE_NO_TX_PHDR_CSUM |
7133 GRC_MODE_NO_RX_PHDR_CSUM);
7134 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7136 /* Pseudo-header checksum is done by hardware logic and not
7137 * the offload processers, so make the chip do the pseudo-
7138 * header checksums on receive. For transmit it is more
7139 * convenient to do the pseudo-header checksum in software
7140 * as Linux does that on transmit for us in all cases.
7142 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7146 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7148 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7149 val = tr32(GRC_MISC_CFG);
7151 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7152 tw32(GRC_MISC_CFG, val);
7154 /* Initialize MBUF/DESC pool. */
7155 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7157 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7158 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7160 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7162 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7163 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7164 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7166 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7169 fw_len = tp->fw_len;
7170 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7171 tw32(BUFMGR_MB_POOL_ADDR,
7172 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7173 tw32(BUFMGR_MB_POOL_SIZE,
7174 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7177 if (tp->dev->mtu <= ETH_DATA_LEN) {
7178 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7179 tp->bufmgr_config.mbuf_read_dma_low_water);
7180 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7181 tp->bufmgr_config.mbuf_mac_rx_low_water);
7182 tw32(BUFMGR_MB_HIGH_WATER,
7183 tp->bufmgr_config.mbuf_high_water);
7185 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7186 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7187 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7188 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7189 tw32(BUFMGR_MB_HIGH_WATER,
7190 tp->bufmgr_config.mbuf_high_water_jumbo);
7192 tw32(BUFMGR_DMA_LOW_WATER,
7193 tp->bufmgr_config.dma_low_water);
7194 tw32(BUFMGR_DMA_HIGH_WATER,
7195 tp->bufmgr_config.dma_high_water);
7197 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7198 for (i = 0; i < 2000; i++) {
7199 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7204 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7209 /* Setup replenish threshold. */
7210 val = tp->rx_pending / 8;
7213 else if (val > tp->rx_std_max_post)
7214 val = tp->rx_std_max_post;
7215 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7216 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7217 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7219 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7220 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7223 tw32(RCVBDI_STD_THRESH, val);
7225 /* Initialize TG3_BDINFO's at:
7226 * RCVDBDI_STD_BD: standard eth size rx ring
7227 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7228 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7231 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7232 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7233 * ring attribute flags
7234 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7236 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7237 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7239 * The size of each ring is fixed in the firmware, but the location is
7242 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7243 ((u64) tpr->rx_std_mapping >> 32));
7244 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7245 ((u64) tpr->rx_std_mapping & 0xffffffff));
7246 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7247 NIC_SRAM_RX_BUFFER_DESC);
7249 /* Disable the mini ring */
7250 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7251 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7252 BDINFO_FLAGS_DISABLED);
7254 /* Program the jumbo buffer descriptor ring control
7255 * blocks on those devices that have them.
7257 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7258 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7259 /* Setup replenish threshold. */
7260 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7262 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7263 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7264 ((u64) tpr->rx_jmb_mapping >> 32));
7265 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7266 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7267 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7268 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7269 BDINFO_FLAGS_USE_EXT_RECV);
7270 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7271 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7273 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7274 BDINFO_FLAGS_DISABLED);
7277 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7279 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7281 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7283 tpr->rx_std_ptr = tp->rx_pending;
7284 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7287 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7288 tp->rx_jumbo_pending : 0;
7289 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7292 tg3_rings_reset(tp);
7294 /* Initialize MAC address and backoff seed. */
7295 __tg3_set_mac_addr(tp, 0);
7297 /* MTU + ethernet header + FCS + optional VLAN tag */
7298 tw32(MAC_RX_MTU_SIZE,
7299 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7301 /* The slot time is changed by tg3_setup_phy if we
7302 * run at gigabit with half duplex.
7304 tw32(MAC_TX_LENGTHS,
7305 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7306 (6 << TX_LENGTHS_IPG_SHIFT) |
7307 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7309 /* Receive rules. */
7310 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7311 tw32(RCVLPC_CONFIG, 0x0181);
7313 /* Calculate RDMAC_MODE setting early, we need it to determine
7314 * the RCVLPC_STATE_ENABLE mask.
7316 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7317 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7318 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7319 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7320 RDMAC_MODE_LNGREAD_ENAB);
7322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7324 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7325 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7326 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7327 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7329 /* If statement applies to 5705 and 5750 PCI devices only */
7330 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7331 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7333 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7335 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7336 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7337 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7338 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7342 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7343 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7345 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7346 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7350 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7352 /* Receive/send statistics. */
7353 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7354 val = tr32(RCVLPC_STATS_ENABLE);
7355 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7356 tw32(RCVLPC_STATS_ENABLE, val);
7357 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7358 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7359 val = tr32(RCVLPC_STATS_ENABLE);
7360 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7361 tw32(RCVLPC_STATS_ENABLE, val);
7363 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7365 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7366 tw32(SNDDATAI_STATSENAB, 0xffffff);
7367 tw32(SNDDATAI_STATSCTRL,
7368 (SNDDATAI_SCTRL_ENABLE |
7369 SNDDATAI_SCTRL_FASTUPD));
7371 /* Setup host coalescing engine. */
7372 tw32(HOSTCC_MODE, 0);
7373 for (i = 0; i < 2000; i++) {
7374 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7379 __tg3_set_coalesce(tp, &tp->coal);
7381 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7382 /* Status/statistics block address. See tg3_timer,
7383 * the tg3_periodic_fetch_stats call there, and
7384 * tg3_get_stats to see how this works for 5705/5750 chips.
7386 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7387 ((u64) tp->stats_mapping >> 32));
7388 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7389 ((u64) tp->stats_mapping & 0xffffffff));
7390 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7392 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7394 /* Clear statistics and status block memory areas */
7395 for (i = NIC_SRAM_STATS_BLK;
7396 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7398 tg3_write_mem(tp, i, 0);
7403 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7405 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7406 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7407 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7408 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7410 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7411 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7412 /* reset to prevent losing 1st rx packet intermittently */
7413 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7417 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7418 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7421 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7422 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7423 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7424 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7425 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7426 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7427 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7430 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7431 * If TG3_FLG2_IS_NIC is zero, we should read the
7432 * register to preserve the GPIO settings for LOMs. The GPIOs,
7433 * whether used as inputs or outputs, are set by boot code after
7436 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7439 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7440 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7441 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7444 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7445 GRC_LCLCTRL_GPIO_OUTPUT3;
7447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7448 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7450 tp->grc_local_ctrl &= ~gpio_mask;
7451 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7453 /* GPIO1 must be driven high for eeprom write protect */
7454 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7455 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7456 GRC_LCLCTRL_GPIO_OUTPUT1);
7458 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7461 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7462 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7466 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7467 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7468 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7469 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7470 WDMAC_MODE_LNGREAD_ENAB);
7472 /* If statement applies to 5705 and 5750 PCI devices only */
7473 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7474 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7476 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7477 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7478 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7480 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7481 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7482 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7483 val |= WDMAC_MODE_RX_ACCEL;
7487 /* Enable host coalescing bug fix */
7488 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7489 val |= WDMAC_MODE_STATUS_TAG_FIX;
7491 tw32_f(WDMAC_MODE, val);
7494 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7497 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7500 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7501 pcix_cmd |= PCI_X_CMD_READ_2K;
7502 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7503 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7504 pcix_cmd |= PCI_X_CMD_READ_2K;
7506 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7510 tw32_f(RDMAC_MODE, rdmac_mode);
7513 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7514 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7515 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7517 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7519 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7521 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7523 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7524 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7525 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7526 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7527 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7528 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7529 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7530 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7532 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7533 err = tg3_load_5701_a0_firmware_fix(tp);
7538 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7539 err = tg3_load_tso_firmware(tp);
7544 tp->tx_mode = TX_MODE_ENABLE;
7545 tw32_f(MAC_TX_MODE, tp->tx_mode);
7548 tp->rx_mode = RX_MODE_ENABLE;
7549 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7550 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7552 tw32_f(MAC_RX_MODE, tp->rx_mode);
7555 tw32(MAC_LED_CTRL, tp->led_ctrl);
7557 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7558 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7559 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7562 tw32_f(MAC_RX_MODE, tp->rx_mode);
7565 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7566 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7567 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7568 /* Set drive transmission level to 1.2V */
7569 /* only if the signal pre-emphasis bit is not set */
7570 val = tr32(MAC_SERDES_CFG);
7573 tw32(MAC_SERDES_CFG, val);
7575 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7576 tw32(MAC_SERDES_CFG, 0x616000);
7579 /* Prevent chip from dropping frames when flow control
7582 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7585 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7586 /* Use hardware link auto-negotiation */
7587 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7590 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7591 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7594 tmp = tr32(SERDES_RX_CTRL);
7595 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7596 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7597 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7598 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7601 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7602 if (tp->link_config.phy_is_low_power) {
7603 tp->link_config.phy_is_low_power = 0;
7604 tp->link_config.speed = tp->link_config.orig_speed;
7605 tp->link_config.duplex = tp->link_config.orig_duplex;
7606 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7609 err = tg3_setup_phy(tp, 0);
7613 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7614 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7617 /* Clear CRC stats. */
7618 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7619 tg3_writephy(tp, MII_TG3_TEST1,
7620 tmp | MII_TG3_TEST1_CRC_EN);
7621 tg3_readphy(tp, 0x14, &tmp);
7626 __tg3_set_rx_mode(tp->dev);
7628 /* Initialize receive rules. */
7629 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7630 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7631 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7632 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7634 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7635 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7639 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7643 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7645 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7647 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7649 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7651 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7653 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7655 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7657 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7659 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7661 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7663 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7665 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7667 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7669 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7677 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7678 /* Write our heartbeat update interval to APE. */
7679 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7680 APE_HOST_HEARTBEAT_INT_DISABLE);
7682 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7687 /* Called at device open time to get the chip ready for
7688 * packet processing. Invoked with tp->lock held.
7690 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7692 tg3_switch_clocks(tp);
7694 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7696 return tg3_reset_hw(tp, reset_phy);
7699 #define TG3_STAT_ADD32(PSTAT, REG) \
7700 do { u32 __val = tr32(REG); \
7701 (PSTAT)->low += __val; \
7702 if ((PSTAT)->low < __val) \
7703 (PSTAT)->high += 1; \
7706 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7708 struct tg3_hw_stats *sp = tp->hw_stats;
7710 if (!netif_carrier_ok(tp->dev))
7713 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7714 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7715 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7716 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7717 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7718 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7719 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7720 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7721 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7722 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7723 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7724 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7725 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7727 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7728 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7729 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7730 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7731 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7732 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7733 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7734 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7735 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7736 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7737 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7738 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7739 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7740 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7742 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7743 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7744 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7747 static void tg3_timer(unsigned long __opaque)
7749 struct tg3 *tp = (struct tg3 *) __opaque;
7754 spin_lock(&tp->lock);
7756 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7757 /* All of this garbage is because when using non-tagged
7758 * IRQ status the mailbox/status_block protocol the chip
7759 * uses with the cpu is race prone.
7761 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
7762 tw32(GRC_LOCAL_CTRL,
7763 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7765 tw32(HOSTCC_MODE, tp->coalesce_mode |
7766 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
7769 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7770 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7771 spin_unlock(&tp->lock);
7772 schedule_work(&tp->reset_task);
7777 /* This part only runs once per second. */
7778 if (!--tp->timer_counter) {
7779 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7780 tg3_periodic_fetch_stats(tp);
7782 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7786 mac_stat = tr32(MAC_STATUS);
7789 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7790 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7792 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7796 tg3_setup_phy(tp, 0);
7797 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7798 u32 mac_stat = tr32(MAC_STATUS);
7801 if (netif_carrier_ok(tp->dev) &&
7802 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7805 if (! netif_carrier_ok(tp->dev) &&
7806 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7807 MAC_STATUS_SIGNAL_DET))) {
7811 if (!tp->serdes_counter) {
7814 ~MAC_MODE_PORT_MODE_MASK));
7816 tw32_f(MAC_MODE, tp->mac_mode);
7819 tg3_setup_phy(tp, 0);
7821 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7822 tg3_serdes_parallel_detect(tp);
7824 tp->timer_counter = tp->timer_multiplier;
7827 /* Heartbeat is only sent once every 2 seconds.
7829 * The heartbeat is to tell the ASF firmware that the host
7830 * driver is still alive. In the event that the OS crashes,
7831 * ASF needs to reset the hardware to free up the FIFO space
7832 * that may be filled with rx packets destined for the host.
7833 * If the FIFO is full, ASF will no longer function properly.
7835 * Unintended resets have been reported on real time kernels
7836 * where the timer doesn't run on time. Netpoll will also have
7839 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7840 * to check the ring condition when the heartbeat is expiring
7841 * before doing the reset. This will prevent most unintended
7844 if (!--tp->asf_counter) {
7845 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7846 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7847 tg3_wait_for_event_ack(tp);
7849 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7850 FWCMD_NICDRV_ALIVE3);
7851 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7852 /* 5 seconds timeout */
7853 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7855 tg3_generate_fw_event(tp);
7857 tp->asf_counter = tp->asf_multiplier;
7860 spin_unlock(&tp->lock);
7863 tp->timer.expires = jiffies + tp->timer_offset;
7864 add_timer(&tp->timer);
7867 static int tg3_request_irq(struct tg3 *tp, int irq_num)
7870 unsigned long flags;
7872 struct tg3_napi *tnapi = &tp->napi[irq_num];
7874 if (tp->irq_cnt == 1)
7875 name = tp->dev->name;
7877 name = &tnapi->irq_lbl[0];
7878 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
7879 name[IFNAMSIZ-1] = 0;
7882 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
7884 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7886 flags = IRQF_SAMPLE_RANDOM;
7889 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7890 fn = tg3_interrupt_tagged;
7891 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7894 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
7897 static int tg3_test_interrupt(struct tg3 *tp)
7899 struct tg3_napi *tnapi = &tp->napi[0];
7900 struct net_device *dev = tp->dev;
7901 int err, i, intr_ok = 0;
7903 if (!netif_running(dev))
7906 tg3_disable_ints(tp);
7908 free_irq(tnapi->irq_vec, tnapi);
7910 err = request_irq(tnapi->irq_vec, tg3_test_isr,
7911 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
7915 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7916 tg3_enable_ints(tp);
7918 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7921 for (i = 0; i < 5; i++) {
7922 u32 int_mbox, misc_host_ctrl;
7924 int_mbox = tr32_mailbox(tnapi->int_mbox);
7925 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7927 if ((int_mbox != 0) ||
7928 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7936 tg3_disable_ints(tp);
7938 free_irq(tnapi->irq_vec, tnapi);
7940 err = tg3_request_irq(tp, 0);
7951 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7952 * successfully restored
7954 static int tg3_test_msi(struct tg3 *tp)
7959 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7962 /* Turn off SERR reporting in case MSI terminates with Master
7965 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7966 pci_write_config_word(tp->pdev, PCI_COMMAND,
7967 pci_cmd & ~PCI_COMMAND_SERR);
7969 err = tg3_test_interrupt(tp);
7971 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7976 /* other failures */
7980 /* MSI test failed, go back to INTx mode */
7981 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7982 "switching to INTx mode. Please report this failure to "
7983 "the PCI maintainer and include system chipset information.\n",
7986 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7988 pci_disable_msi(tp->pdev);
7990 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7992 err = tg3_request_irq(tp, 0);
7996 /* Need to reset the chip because the MSI cycle may have terminated
7997 * with Master Abort.
7999 tg3_full_lock(tp, 1);
8001 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8002 err = tg3_init_hw(tp, 1);
8004 tg3_full_unlock(tp);
8007 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8012 static int tg3_request_firmware(struct tg3 *tp)
8014 const __be32 *fw_data;
8016 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8017 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8018 tp->dev->name, tp->fw_needed);
8022 fw_data = (void *)tp->fw->data;
8024 /* Firmware blob starts with version numbers, followed by
8025 * start address and _full_ length including BSS sections
8026 * (which must be longer than the actual data, of course
8029 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8030 if (tp->fw_len < (tp->fw->size - 12)) {
8031 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8032 tp->dev->name, tp->fw_len, tp->fw_needed);
8033 release_firmware(tp->fw);
8038 /* We no longer need firmware; we have it. */
8039 tp->fw_needed = NULL;
8043 static bool tg3_enable_msix(struct tg3 *tp)
8045 int i, rc, cpus = num_online_cpus();
8046 struct msix_entry msix_ent[tp->irq_max];
8049 /* Just fallback to the simpler MSI mode. */
8053 * We want as many rx rings enabled as there are cpus.
8054 * The first MSIX vector only deals with link interrupts, etc,
8055 * so we add one to the number of vectors we are requesting.
8057 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8059 for (i = 0; i < tp->irq_max; i++) {
8060 msix_ent[i].entry = i;
8061 msix_ent[i].vector = 0;
8064 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8066 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8068 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8071 "%s: Requested %d MSI-X vectors, received %d\n",
8072 tp->dev->name, tp->irq_cnt, rc);
8076 for (i = 0; i < tp->irq_max; i++)
8077 tp->napi[i].irq_vec = msix_ent[i].vector;
8079 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8084 static void tg3_ints_init(struct tg3 *tp)
8086 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8087 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8088 /* All MSI supporting chips should support tagged
8089 * status. Assert that this is the case.
8091 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8092 "Not using MSI.\n", tp->dev->name);
8096 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8097 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8098 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8099 pci_enable_msi(tp->pdev) == 0)
8100 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8102 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8103 u32 msi_mode = tr32(MSGINT_MODE);
8104 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8107 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8109 tp->napi[0].irq_vec = tp->pdev->irq;
8110 tp->dev->real_num_tx_queues = 1;
8114 static void tg3_ints_fini(struct tg3 *tp)
8116 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8117 pci_disable_msix(tp->pdev);
8118 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8119 pci_disable_msi(tp->pdev);
8120 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8123 static int tg3_open(struct net_device *dev)
8125 struct tg3 *tp = netdev_priv(dev);
8128 if (tp->fw_needed) {
8129 err = tg3_request_firmware(tp);
8130 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8134 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8136 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8137 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8138 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8140 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8144 netif_carrier_off(tp->dev);
8146 err = tg3_set_power_state(tp, PCI_D0);
8150 tg3_full_lock(tp, 0);
8152 tg3_disable_ints(tp);
8153 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8155 tg3_full_unlock(tp);
8158 * Setup interrupts first so we know how
8159 * many NAPI resources to allocate
8163 /* The placement of this call is tied
8164 * to the setup and use of Host TX descriptors.
8166 err = tg3_alloc_consistent(tp);
8170 tg3_napi_enable(tp);
8172 for (i = 0; i < tp->irq_cnt; i++) {
8173 struct tg3_napi *tnapi = &tp->napi[i];
8174 err = tg3_request_irq(tp, i);
8176 for (i--; i >= 0; i--)
8177 free_irq(tnapi->irq_vec, tnapi);
8185 tg3_full_lock(tp, 0);
8187 err = tg3_init_hw(tp, 1);
8189 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8192 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8193 tp->timer_offset = HZ;
8195 tp->timer_offset = HZ / 10;
8197 BUG_ON(tp->timer_offset > HZ);
8198 tp->timer_counter = tp->timer_multiplier =
8199 (HZ / tp->timer_offset);
8200 tp->asf_counter = tp->asf_multiplier =
8201 ((HZ / tp->timer_offset) * 2);
8203 init_timer(&tp->timer);
8204 tp->timer.expires = jiffies + tp->timer_offset;
8205 tp->timer.data = (unsigned long) tp;
8206 tp->timer.function = tg3_timer;
8209 tg3_full_unlock(tp);
8214 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8215 err = tg3_test_msi(tp);
8218 tg3_full_lock(tp, 0);
8219 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8221 tg3_full_unlock(tp);
8226 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8227 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8228 u32 val = tr32(PCIE_TRANSACTION_CFG);
8230 tw32(PCIE_TRANSACTION_CFG,
8231 val | PCIE_TRANS_CFG_1SHOT_MSI);
8238 tg3_full_lock(tp, 0);
8240 add_timer(&tp->timer);
8241 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8242 tg3_enable_ints(tp);
8244 tg3_full_unlock(tp);
8246 netif_tx_start_all_queues(dev);
8251 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8252 struct tg3_napi *tnapi = &tp->napi[i];
8253 free_irq(tnapi->irq_vec, tnapi);
8257 tg3_napi_disable(tp);
8258 tg3_free_consistent(tp);
8266 /*static*/ void tg3_dump_state(struct tg3 *tp)
8268 u32 val32, val32_2, val32_3, val32_4, val32_5;
8271 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8273 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8274 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8275 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8279 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8280 tr32(MAC_MODE), tr32(MAC_STATUS));
8281 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8282 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8283 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8284 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8285 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8286 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8288 /* Send data initiator control block */
8289 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8290 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8291 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8292 tr32(SNDDATAI_STATSCTRL));
8294 /* Send data completion control block */
8295 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8297 /* Send BD ring selector block */
8298 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8299 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8301 /* Send BD initiator control block */
8302 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8303 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8305 /* Send BD completion control block */
8306 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8308 /* Receive list placement control block */
8309 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8310 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8311 printk(" RCVLPC_STATSCTRL[%08x]\n",
8312 tr32(RCVLPC_STATSCTRL));
8314 /* Receive data and receive BD initiator control block */
8315 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8316 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8318 /* Receive data completion control block */
8319 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8322 /* Receive BD initiator control block */
8323 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8324 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8326 /* Receive BD completion control block */
8327 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8328 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8330 /* Receive list selector control block */
8331 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8332 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8334 /* Mbuf cluster free block */
8335 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8336 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8338 /* Host coalescing control block */
8339 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8340 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8341 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8342 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8343 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8344 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8345 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8346 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8347 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8348 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8349 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8350 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8352 /* Memory arbiter control block */
8353 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8354 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8356 /* Buffer manager control block */
8357 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8358 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8359 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8360 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8361 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8362 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8363 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8364 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8366 /* Read DMA control block */
8367 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8368 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8370 /* Write DMA control block */
8371 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8372 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8374 /* DMA completion block */
8375 printk("DEBUG: DMAC_MODE[%08x]\n",
8379 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8380 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8381 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8382 tr32(GRC_LOCAL_CTRL));
8385 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8386 tr32(RCVDBDI_JUMBO_BD + 0x0),
8387 tr32(RCVDBDI_JUMBO_BD + 0x4),
8388 tr32(RCVDBDI_JUMBO_BD + 0x8),
8389 tr32(RCVDBDI_JUMBO_BD + 0xc));
8390 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8391 tr32(RCVDBDI_STD_BD + 0x0),
8392 tr32(RCVDBDI_STD_BD + 0x4),
8393 tr32(RCVDBDI_STD_BD + 0x8),
8394 tr32(RCVDBDI_STD_BD + 0xc));
8395 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8396 tr32(RCVDBDI_MINI_BD + 0x0),
8397 tr32(RCVDBDI_MINI_BD + 0x4),
8398 tr32(RCVDBDI_MINI_BD + 0x8),
8399 tr32(RCVDBDI_MINI_BD + 0xc));
8401 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8402 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8403 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8404 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8405 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8406 val32, val32_2, val32_3, val32_4);
8408 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8409 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8410 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8411 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8412 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8413 val32, val32_2, val32_3, val32_4);
8415 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8416 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8417 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8418 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8419 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8420 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8421 val32, val32_2, val32_3, val32_4, val32_5);
8423 /* SW status block */
8425 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8428 sblk->rx_jumbo_consumer,
8430 sblk->rx_mini_consumer,
8431 sblk->idx[0].rx_producer,
8432 sblk->idx[0].tx_consumer);
8434 /* SW statistics block */
8435 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8436 ((u32 *)tp->hw_stats)[0],
8437 ((u32 *)tp->hw_stats)[1],
8438 ((u32 *)tp->hw_stats)[2],
8439 ((u32 *)tp->hw_stats)[3]);
8442 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8443 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8444 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8445 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8446 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8448 /* NIC side send descriptors. */
8449 for (i = 0; i < 6; i++) {
8452 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8453 + (i * sizeof(struct tg3_tx_buffer_desc));
8454 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8456 readl(txd + 0x0), readl(txd + 0x4),
8457 readl(txd + 0x8), readl(txd + 0xc));
8460 /* NIC side RX descriptors. */
8461 for (i = 0; i < 6; i++) {
8464 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8465 + (i * sizeof(struct tg3_rx_buffer_desc));
8466 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8468 readl(rxd + 0x0), readl(rxd + 0x4),
8469 readl(rxd + 0x8), readl(rxd + 0xc));
8470 rxd += (4 * sizeof(u32));
8471 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8473 readl(rxd + 0x0), readl(rxd + 0x4),
8474 readl(rxd + 0x8), readl(rxd + 0xc));
8477 for (i = 0; i < 6; i++) {
8480 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8481 + (i * sizeof(struct tg3_rx_buffer_desc));
8482 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8484 readl(rxd + 0x0), readl(rxd + 0x4),
8485 readl(rxd + 0x8), readl(rxd + 0xc));
8486 rxd += (4 * sizeof(u32));
8487 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8489 readl(rxd + 0x0), readl(rxd + 0x4),
8490 readl(rxd + 0x8), readl(rxd + 0xc));
8495 static struct net_device_stats *tg3_get_stats(struct net_device *);
8496 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8498 static int tg3_close(struct net_device *dev)
8501 struct tg3 *tp = netdev_priv(dev);
8503 tg3_napi_disable(tp);
8504 cancel_work_sync(&tp->reset_task);
8506 netif_tx_stop_all_queues(dev);
8508 del_timer_sync(&tp->timer);
8510 tg3_full_lock(tp, 1);
8515 tg3_disable_ints(tp);
8517 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8519 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8521 tg3_full_unlock(tp);
8523 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8524 struct tg3_napi *tnapi = &tp->napi[i];
8525 free_irq(tnapi->irq_vec, tnapi);
8530 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8531 sizeof(tp->net_stats_prev));
8532 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8533 sizeof(tp->estats_prev));
8535 tg3_free_consistent(tp);
8537 tg3_set_power_state(tp, PCI_D3hot);
8539 netif_carrier_off(tp->dev);
8544 static inline unsigned long get_stat64(tg3_stat64_t *val)
8548 #if (BITS_PER_LONG == 32)
8551 ret = ((u64)val->high << 32) | ((u64)val->low);
8556 static inline u64 get_estat64(tg3_stat64_t *val)
8558 return ((u64)val->high << 32) | ((u64)val->low);
8561 static unsigned long calc_crc_errors(struct tg3 *tp)
8563 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8565 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8566 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8570 spin_lock_bh(&tp->lock);
8571 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8572 tg3_writephy(tp, MII_TG3_TEST1,
8573 val | MII_TG3_TEST1_CRC_EN);
8574 tg3_readphy(tp, 0x14, &val);
8577 spin_unlock_bh(&tp->lock);
8579 tp->phy_crc_errors += val;
8581 return tp->phy_crc_errors;
8584 return get_stat64(&hw_stats->rx_fcs_errors);
8587 #define ESTAT_ADD(member) \
8588 estats->member = old_estats->member + \
8589 get_estat64(&hw_stats->member)
8591 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8593 struct tg3_ethtool_stats *estats = &tp->estats;
8594 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8595 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8600 ESTAT_ADD(rx_octets);
8601 ESTAT_ADD(rx_fragments);
8602 ESTAT_ADD(rx_ucast_packets);
8603 ESTAT_ADD(rx_mcast_packets);
8604 ESTAT_ADD(rx_bcast_packets);
8605 ESTAT_ADD(rx_fcs_errors);
8606 ESTAT_ADD(rx_align_errors);
8607 ESTAT_ADD(rx_xon_pause_rcvd);
8608 ESTAT_ADD(rx_xoff_pause_rcvd);
8609 ESTAT_ADD(rx_mac_ctrl_rcvd);
8610 ESTAT_ADD(rx_xoff_entered);
8611 ESTAT_ADD(rx_frame_too_long_errors);
8612 ESTAT_ADD(rx_jabbers);
8613 ESTAT_ADD(rx_undersize_packets);
8614 ESTAT_ADD(rx_in_length_errors);
8615 ESTAT_ADD(rx_out_length_errors);
8616 ESTAT_ADD(rx_64_or_less_octet_packets);
8617 ESTAT_ADD(rx_65_to_127_octet_packets);
8618 ESTAT_ADD(rx_128_to_255_octet_packets);
8619 ESTAT_ADD(rx_256_to_511_octet_packets);
8620 ESTAT_ADD(rx_512_to_1023_octet_packets);
8621 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8622 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8623 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8624 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8625 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8627 ESTAT_ADD(tx_octets);
8628 ESTAT_ADD(tx_collisions);
8629 ESTAT_ADD(tx_xon_sent);
8630 ESTAT_ADD(tx_xoff_sent);
8631 ESTAT_ADD(tx_flow_control);
8632 ESTAT_ADD(tx_mac_errors);
8633 ESTAT_ADD(tx_single_collisions);
8634 ESTAT_ADD(tx_mult_collisions);
8635 ESTAT_ADD(tx_deferred);
8636 ESTAT_ADD(tx_excessive_collisions);
8637 ESTAT_ADD(tx_late_collisions);
8638 ESTAT_ADD(tx_collide_2times);
8639 ESTAT_ADD(tx_collide_3times);
8640 ESTAT_ADD(tx_collide_4times);
8641 ESTAT_ADD(tx_collide_5times);
8642 ESTAT_ADD(tx_collide_6times);
8643 ESTAT_ADD(tx_collide_7times);
8644 ESTAT_ADD(tx_collide_8times);
8645 ESTAT_ADD(tx_collide_9times);
8646 ESTAT_ADD(tx_collide_10times);
8647 ESTAT_ADD(tx_collide_11times);
8648 ESTAT_ADD(tx_collide_12times);
8649 ESTAT_ADD(tx_collide_13times);
8650 ESTAT_ADD(tx_collide_14times);
8651 ESTAT_ADD(tx_collide_15times);
8652 ESTAT_ADD(tx_ucast_packets);
8653 ESTAT_ADD(tx_mcast_packets);
8654 ESTAT_ADD(tx_bcast_packets);
8655 ESTAT_ADD(tx_carrier_sense_errors);
8656 ESTAT_ADD(tx_discards);
8657 ESTAT_ADD(tx_errors);
8659 ESTAT_ADD(dma_writeq_full);
8660 ESTAT_ADD(dma_write_prioq_full);
8661 ESTAT_ADD(rxbds_empty);
8662 ESTAT_ADD(rx_discards);
8663 ESTAT_ADD(rx_errors);
8664 ESTAT_ADD(rx_threshold_hit);
8666 ESTAT_ADD(dma_readq_full);
8667 ESTAT_ADD(dma_read_prioq_full);
8668 ESTAT_ADD(tx_comp_queue_full);
8670 ESTAT_ADD(ring_set_send_prod_index);
8671 ESTAT_ADD(ring_status_update);
8672 ESTAT_ADD(nic_irqs);
8673 ESTAT_ADD(nic_avoided_irqs);
8674 ESTAT_ADD(nic_tx_threshold_hit);
8679 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8681 struct tg3 *tp = netdev_priv(dev);
8682 struct net_device_stats *stats = &tp->net_stats;
8683 struct net_device_stats *old_stats = &tp->net_stats_prev;
8684 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8689 stats->rx_packets = old_stats->rx_packets +
8690 get_stat64(&hw_stats->rx_ucast_packets) +
8691 get_stat64(&hw_stats->rx_mcast_packets) +
8692 get_stat64(&hw_stats->rx_bcast_packets);
8694 stats->tx_packets = old_stats->tx_packets +
8695 get_stat64(&hw_stats->tx_ucast_packets) +
8696 get_stat64(&hw_stats->tx_mcast_packets) +
8697 get_stat64(&hw_stats->tx_bcast_packets);
8699 stats->rx_bytes = old_stats->rx_bytes +
8700 get_stat64(&hw_stats->rx_octets);
8701 stats->tx_bytes = old_stats->tx_bytes +
8702 get_stat64(&hw_stats->tx_octets);
8704 stats->rx_errors = old_stats->rx_errors +
8705 get_stat64(&hw_stats->rx_errors);
8706 stats->tx_errors = old_stats->tx_errors +
8707 get_stat64(&hw_stats->tx_errors) +
8708 get_stat64(&hw_stats->tx_mac_errors) +
8709 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8710 get_stat64(&hw_stats->tx_discards);
8712 stats->multicast = old_stats->multicast +
8713 get_stat64(&hw_stats->rx_mcast_packets);
8714 stats->collisions = old_stats->collisions +
8715 get_stat64(&hw_stats->tx_collisions);
8717 stats->rx_length_errors = old_stats->rx_length_errors +
8718 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8719 get_stat64(&hw_stats->rx_undersize_packets);
8721 stats->rx_over_errors = old_stats->rx_over_errors +
8722 get_stat64(&hw_stats->rxbds_empty);
8723 stats->rx_frame_errors = old_stats->rx_frame_errors +
8724 get_stat64(&hw_stats->rx_align_errors);
8725 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8726 get_stat64(&hw_stats->tx_discards);
8727 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8728 get_stat64(&hw_stats->tx_carrier_sense_errors);
8730 stats->rx_crc_errors = old_stats->rx_crc_errors +
8731 calc_crc_errors(tp);
8733 stats->rx_missed_errors = old_stats->rx_missed_errors +
8734 get_stat64(&hw_stats->rx_discards);
8739 static inline u32 calc_crc(unsigned char *buf, int len)
8747 for (j = 0; j < len; j++) {
8750 for (k = 0; k < 8; k++) {
8764 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8766 /* accept or reject all multicast frames */
8767 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8768 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8769 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8770 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8773 static void __tg3_set_rx_mode(struct net_device *dev)
8775 struct tg3 *tp = netdev_priv(dev);
8778 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8779 RX_MODE_KEEP_VLAN_TAG);
8781 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8784 #if TG3_VLAN_TAG_USED
8786 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8787 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8789 /* By definition, VLAN is disabled always in this
8792 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8793 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8796 if (dev->flags & IFF_PROMISC) {
8797 /* Promiscuous mode. */
8798 rx_mode |= RX_MODE_PROMISC;
8799 } else if (dev->flags & IFF_ALLMULTI) {
8800 /* Accept all multicast. */
8801 tg3_set_multi (tp, 1);
8802 } else if (dev->mc_count < 1) {
8803 /* Reject all multicast. */
8804 tg3_set_multi (tp, 0);
8806 /* Accept one or more multicast(s). */
8807 struct dev_mc_list *mclist;
8809 u32 mc_filter[4] = { 0, };
8814 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8815 i++, mclist = mclist->next) {
8817 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8819 regidx = (bit & 0x60) >> 5;
8821 mc_filter[regidx] |= (1 << bit);
8824 tw32(MAC_HASH_REG_0, mc_filter[0]);
8825 tw32(MAC_HASH_REG_1, mc_filter[1]);
8826 tw32(MAC_HASH_REG_2, mc_filter[2]);
8827 tw32(MAC_HASH_REG_3, mc_filter[3]);
8830 if (rx_mode != tp->rx_mode) {
8831 tp->rx_mode = rx_mode;
8832 tw32_f(MAC_RX_MODE, rx_mode);
8837 static void tg3_set_rx_mode(struct net_device *dev)
8839 struct tg3 *tp = netdev_priv(dev);
8841 if (!netif_running(dev))
8844 tg3_full_lock(tp, 0);
8845 __tg3_set_rx_mode(dev);
8846 tg3_full_unlock(tp);
8849 #define TG3_REGDUMP_LEN (32 * 1024)
8851 static int tg3_get_regs_len(struct net_device *dev)
8853 return TG3_REGDUMP_LEN;
8856 static void tg3_get_regs(struct net_device *dev,
8857 struct ethtool_regs *regs, void *_p)
8860 struct tg3 *tp = netdev_priv(dev);
8866 memset(p, 0, TG3_REGDUMP_LEN);
8868 if (tp->link_config.phy_is_low_power)
8871 tg3_full_lock(tp, 0);
8873 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8874 #define GET_REG32_LOOP(base,len) \
8875 do { p = (u32 *)(orig_p + (base)); \
8876 for (i = 0; i < len; i += 4) \
8877 __GET_REG32((base) + i); \
8879 #define GET_REG32_1(reg) \
8880 do { p = (u32 *)(orig_p + (reg)); \
8881 __GET_REG32((reg)); \
8884 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8885 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8886 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8887 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8888 GET_REG32_1(SNDDATAC_MODE);
8889 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8890 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8891 GET_REG32_1(SNDBDC_MODE);
8892 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8893 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8894 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8895 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8896 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8897 GET_REG32_1(RCVDCC_MODE);
8898 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8899 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8900 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8901 GET_REG32_1(MBFREE_MODE);
8902 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8903 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8904 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8905 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8906 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8907 GET_REG32_1(RX_CPU_MODE);
8908 GET_REG32_1(RX_CPU_STATE);
8909 GET_REG32_1(RX_CPU_PGMCTR);
8910 GET_REG32_1(RX_CPU_HWBKPT);
8911 GET_REG32_1(TX_CPU_MODE);
8912 GET_REG32_1(TX_CPU_STATE);
8913 GET_REG32_1(TX_CPU_PGMCTR);
8914 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8915 GET_REG32_LOOP(FTQ_RESET, 0x120);
8916 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8917 GET_REG32_1(DMAC_MODE);
8918 GET_REG32_LOOP(GRC_MODE, 0x4c);
8919 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8920 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8923 #undef GET_REG32_LOOP
8926 tg3_full_unlock(tp);
8929 static int tg3_get_eeprom_len(struct net_device *dev)
8931 struct tg3 *tp = netdev_priv(dev);
8933 return tp->nvram_size;
8936 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8938 struct tg3 *tp = netdev_priv(dev);
8941 u32 i, offset, len, b_offset, b_count;
8944 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
8947 if (tp->link_config.phy_is_low_power)
8950 offset = eeprom->offset;
8954 eeprom->magic = TG3_EEPROM_MAGIC;
8957 /* adjustments to start on required 4 byte boundary */
8958 b_offset = offset & 3;
8959 b_count = 4 - b_offset;
8960 if (b_count > len) {
8961 /* i.e. offset=1 len=2 */
8964 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
8967 memcpy(data, ((char*)&val) + b_offset, b_count);
8970 eeprom->len += b_count;
8973 /* read bytes upto the last 4 byte boundary */
8974 pd = &data[eeprom->len];
8975 for (i = 0; i < (len - (len & 3)); i += 4) {
8976 ret = tg3_nvram_read_be32(tp, offset + i, &val);
8981 memcpy(pd + i, &val, 4);
8986 /* read last bytes not ending on 4 byte boundary */
8987 pd = &data[eeprom->len];
8989 b_offset = offset + len - b_count;
8990 ret = tg3_nvram_read_be32(tp, b_offset, &val);
8993 memcpy(pd, &val, b_count);
8994 eeprom->len += b_count;
8999 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9001 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9003 struct tg3 *tp = netdev_priv(dev);
9005 u32 offset, len, b_offset, odd_len;
9009 if (tp->link_config.phy_is_low_power)
9012 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9013 eeprom->magic != TG3_EEPROM_MAGIC)
9016 offset = eeprom->offset;
9019 if ((b_offset = (offset & 3))) {
9020 /* adjustments to start on required 4 byte boundary */
9021 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9032 /* adjustments to end on required 4 byte boundary */
9034 len = (len + 3) & ~3;
9035 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9041 if (b_offset || odd_len) {
9042 buf = kmalloc(len, GFP_KERNEL);
9046 memcpy(buf, &start, 4);
9048 memcpy(buf+len-4, &end, 4);
9049 memcpy(buf + b_offset, data, eeprom->len);
9052 ret = tg3_nvram_write_block(tp, offset, len, buf);
9060 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9062 struct tg3 *tp = netdev_priv(dev);
9064 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9065 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9067 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9070 cmd->supported = (SUPPORTED_Autoneg);
9072 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9073 cmd->supported |= (SUPPORTED_1000baseT_Half |
9074 SUPPORTED_1000baseT_Full);
9076 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9077 cmd->supported |= (SUPPORTED_100baseT_Half |
9078 SUPPORTED_100baseT_Full |
9079 SUPPORTED_10baseT_Half |
9080 SUPPORTED_10baseT_Full |
9082 cmd->port = PORT_TP;
9084 cmd->supported |= SUPPORTED_FIBRE;
9085 cmd->port = PORT_FIBRE;
9088 cmd->advertising = tp->link_config.advertising;
9089 if (netif_running(dev)) {
9090 cmd->speed = tp->link_config.active_speed;
9091 cmd->duplex = tp->link_config.active_duplex;
9093 cmd->phy_address = PHY_ADDR;
9094 cmd->transceiver = XCVR_INTERNAL;
9095 cmd->autoneg = tp->link_config.autoneg;
9101 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9103 struct tg3 *tp = netdev_priv(dev);
9105 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9106 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9108 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9111 if (cmd->autoneg != AUTONEG_ENABLE &&
9112 cmd->autoneg != AUTONEG_DISABLE)
9115 if (cmd->autoneg == AUTONEG_DISABLE &&
9116 cmd->duplex != DUPLEX_FULL &&
9117 cmd->duplex != DUPLEX_HALF)
9120 if (cmd->autoneg == AUTONEG_ENABLE) {
9121 u32 mask = ADVERTISED_Autoneg |
9123 ADVERTISED_Asym_Pause;
9125 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9126 mask |= ADVERTISED_1000baseT_Half |
9127 ADVERTISED_1000baseT_Full;
9129 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9130 mask |= ADVERTISED_100baseT_Half |
9131 ADVERTISED_100baseT_Full |
9132 ADVERTISED_10baseT_Half |
9133 ADVERTISED_10baseT_Full |
9136 mask |= ADVERTISED_FIBRE;
9138 if (cmd->advertising & ~mask)
9141 mask &= (ADVERTISED_1000baseT_Half |
9142 ADVERTISED_1000baseT_Full |
9143 ADVERTISED_100baseT_Half |
9144 ADVERTISED_100baseT_Full |
9145 ADVERTISED_10baseT_Half |
9146 ADVERTISED_10baseT_Full);
9148 cmd->advertising &= mask;
9150 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9151 if (cmd->speed != SPEED_1000)
9154 if (cmd->duplex != DUPLEX_FULL)
9157 if (cmd->speed != SPEED_100 &&
9158 cmd->speed != SPEED_10)
9163 tg3_full_lock(tp, 0);
9165 tp->link_config.autoneg = cmd->autoneg;
9166 if (cmd->autoneg == AUTONEG_ENABLE) {
9167 tp->link_config.advertising = (cmd->advertising |
9168 ADVERTISED_Autoneg);
9169 tp->link_config.speed = SPEED_INVALID;
9170 tp->link_config.duplex = DUPLEX_INVALID;
9172 tp->link_config.advertising = 0;
9173 tp->link_config.speed = cmd->speed;
9174 tp->link_config.duplex = cmd->duplex;
9177 tp->link_config.orig_speed = tp->link_config.speed;
9178 tp->link_config.orig_duplex = tp->link_config.duplex;
9179 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9181 if (netif_running(dev))
9182 tg3_setup_phy(tp, 1);
9184 tg3_full_unlock(tp);
9189 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9191 struct tg3 *tp = netdev_priv(dev);
9193 strcpy(info->driver, DRV_MODULE_NAME);
9194 strcpy(info->version, DRV_MODULE_VERSION);
9195 strcpy(info->fw_version, tp->fw_ver);
9196 strcpy(info->bus_info, pci_name(tp->pdev));
9199 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9201 struct tg3 *tp = netdev_priv(dev);
9203 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9204 device_can_wakeup(&tp->pdev->dev))
9205 wol->supported = WAKE_MAGIC;
9209 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9210 device_can_wakeup(&tp->pdev->dev))
9211 wol->wolopts = WAKE_MAGIC;
9212 memset(&wol->sopass, 0, sizeof(wol->sopass));
9215 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9217 struct tg3 *tp = netdev_priv(dev);
9218 struct device *dp = &tp->pdev->dev;
9220 if (wol->wolopts & ~WAKE_MAGIC)
9222 if ((wol->wolopts & WAKE_MAGIC) &&
9223 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9226 spin_lock_bh(&tp->lock);
9227 if (wol->wolopts & WAKE_MAGIC) {
9228 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9229 device_set_wakeup_enable(dp, true);
9231 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9232 device_set_wakeup_enable(dp, false);
9234 spin_unlock_bh(&tp->lock);
9239 static u32 tg3_get_msglevel(struct net_device *dev)
9241 struct tg3 *tp = netdev_priv(dev);
9242 return tp->msg_enable;
9245 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9247 struct tg3 *tp = netdev_priv(dev);
9248 tp->msg_enable = value;
9251 static int tg3_set_tso(struct net_device *dev, u32 value)
9253 struct tg3 *tp = netdev_priv(dev);
9255 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9260 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9261 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9263 dev->features |= NETIF_F_TSO6;
9264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9265 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9266 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9269 dev->features |= NETIF_F_TSO_ECN;
9271 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9273 return ethtool_op_set_tso(dev, value);
9276 static int tg3_nway_reset(struct net_device *dev)
9278 struct tg3 *tp = netdev_priv(dev);
9281 if (!netif_running(dev))
9284 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9287 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9288 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9290 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9294 spin_lock_bh(&tp->lock);
9296 tg3_readphy(tp, MII_BMCR, &bmcr);
9297 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9298 ((bmcr & BMCR_ANENABLE) ||
9299 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9300 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9304 spin_unlock_bh(&tp->lock);
9310 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9312 struct tg3 *tp = netdev_priv(dev);
9314 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9315 ering->rx_mini_max_pending = 0;
9316 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9317 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9319 ering->rx_jumbo_max_pending = 0;
9321 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9323 ering->rx_pending = tp->rx_pending;
9324 ering->rx_mini_pending = 0;
9325 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9326 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9328 ering->rx_jumbo_pending = 0;
9330 ering->tx_pending = tp->napi[0].tx_pending;
9333 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9335 struct tg3 *tp = netdev_priv(dev);
9336 int i, irq_sync = 0, err = 0;
9338 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9339 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9340 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9341 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9342 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9343 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9346 if (netif_running(dev)) {
9352 tg3_full_lock(tp, irq_sync);
9354 tp->rx_pending = ering->rx_pending;
9356 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9357 tp->rx_pending > 63)
9358 tp->rx_pending = 63;
9359 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9361 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9362 tp->napi[i].tx_pending = ering->tx_pending;
9364 if (netif_running(dev)) {
9365 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9366 err = tg3_restart_hw(tp, 1);
9368 tg3_netif_start(tp);
9371 tg3_full_unlock(tp);
9373 if (irq_sync && !err)
9379 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9381 struct tg3 *tp = netdev_priv(dev);
9383 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9385 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9386 epause->rx_pause = 1;
9388 epause->rx_pause = 0;
9390 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9391 epause->tx_pause = 1;
9393 epause->tx_pause = 0;
9396 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9398 struct tg3 *tp = netdev_priv(dev);
9401 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9402 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9405 if (epause->autoneg) {
9407 struct phy_device *phydev;
9409 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9411 if (epause->rx_pause) {
9412 if (epause->tx_pause)
9413 newadv = ADVERTISED_Pause;
9415 newadv = ADVERTISED_Pause |
9416 ADVERTISED_Asym_Pause;
9417 } else if (epause->tx_pause) {
9418 newadv = ADVERTISED_Asym_Pause;
9422 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9423 u32 oldadv = phydev->advertising &
9425 ADVERTISED_Asym_Pause);
9426 if (oldadv != newadv) {
9427 phydev->advertising &=
9428 ~(ADVERTISED_Pause |
9429 ADVERTISED_Asym_Pause);
9430 phydev->advertising |= newadv;
9431 err = phy_start_aneg(phydev);
9434 tp->link_config.advertising &=
9435 ~(ADVERTISED_Pause |
9436 ADVERTISED_Asym_Pause);
9437 tp->link_config.advertising |= newadv;
9440 if (epause->rx_pause)
9441 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9443 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9445 if (epause->tx_pause)
9446 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9448 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9450 if (netif_running(dev))
9451 tg3_setup_flow_control(tp, 0, 0);
9456 if (netif_running(dev)) {
9461 tg3_full_lock(tp, irq_sync);
9463 if (epause->autoneg)
9464 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9466 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9467 if (epause->rx_pause)
9468 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9470 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9471 if (epause->tx_pause)
9472 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9474 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9476 if (netif_running(dev)) {
9477 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9478 err = tg3_restart_hw(tp, 1);
9480 tg3_netif_start(tp);
9483 tg3_full_unlock(tp);
9489 static u32 tg3_get_rx_csum(struct net_device *dev)
9491 struct tg3 *tp = netdev_priv(dev);
9492 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9495 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9497 struct tg3 *tp = netdev_priv(dev);
9499 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9505 spin_lock_bh(&tp->lock);
9507 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9509 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9510 spin_unlock_bh(&tp->lock);
9515 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9517 struct tg3 *tp = netdev_priv(dev);
9519 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9525 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9526 ethtool_op_set_tx_ipv6_csum(dev, data);
9528 ethtool_op_set_tx_csum(dev, data);
9533 static int tg3_get_sset_count (struct net_device *dev, int sset)
9537 return TG3_NUM_TEST;
9539 return TG3_NUM_STATS;
9545 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9547 switch (stringset) {
9549 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9552 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9555 WARN_ON(1); /* we need a WARN() */
9560 static int tg3_phys_id(struct net_device *dev, u32 data)
9562 struct tg3 *tp = netdev_priv(dev);
9565 if (!netif_running(tp->dev))
9569 data = UINT_MAX / 2;
9571 for (i = 0; i < (data * 2); i++) {
9573 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9574 LED_CTRL_1000MBPS_ON |
9575 LED_CTRL_100MBPS_ON |
9576 LED_CTRL_10MBPS_ON |
9577 LED_CTRL_TRAFFIC_OVERRIDE |
9578 LED_CTRL_TRAFFIC_BLINK |
9579 LED_CTRL_TRAFFIC_LED);
9582 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9583 LED_CTRL_TRAFFIC_OVERRIDE);
9585 if (msleep_interruptible(500))
9588 tw32(MAC_LED_CTRL, tp->led_ctrl);
9592 static void tg3_get_ethtool_stats (struct net_device *dev,
9593 struct ethtool_stats *estats, u64 *tmp_stats)
9595 struct tg3 *tp = netdev_priv(dev);
9596 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9599 #define NVRAM_TEST_SIZE 0x100
9600 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9601 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9602 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9603 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9604 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9606 static int tg3_test_nvram(struct tg3 *tp)
9610 int i, j, k, err = 0, size;
9612 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9615 if (tg3_nvram_read(tp, 0, &magic) != 0)
9618 if (magic == TG3_EEPROM_MAGIC)
9619 size = NVRAM_TEST_SIZE;
9620 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9621 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9622 TG3_EEPROM_SB_FORMAT_1) {
9623 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9624 case TG3_EEPROM_SB_REVISION_0:
9625 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9627 case TG3_EEPROM_SB_REVISION_2:
9628 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9630 case TG3_EEPROM_SB_REVISION_3:
9631 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9638 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9639 size = NVRAM_SELFBOOT_HW_SIZE;
9643 buf = kmalloc(size, GFP_KERNEL);
9648 for (i = 0, j = 0; i < size; i += 4, j++) {
9649 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9656 /* Selfboot format */
9657 magic = be32_to_cpu(buf[0]);
9658 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9659 TG3_EEPROM_MAGIC_FW) {
9660 u8 *buf8 = (u8 *) buf, csum8 = 0;
9662 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9663 TG3_EEPROM_SB_REVISION_2) {
9664 /* For rev 2, the csum doesn't include the MBA. */
9665 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9667 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9670 for (i = 0; i < size; i++)
9683 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9684 TG3_EEPROM_MAGIC_HW) {
9685 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9686 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9687 u8 *buf8 = (u8 *) buf;
9689 /* Separate the parity bits and the data bytes. */
9690 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9691 if ((i == 0) || (i == 8)) {
9695 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9696 parity[k++] = buf8[i] & msk;
9703 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9704 parity[k++] = buf8[i] & msk;
9707 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9708 parity[k++] = buf8[i] & msk;
9711 data[j++] = buf8[i];
9715 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9716 u8 hw8 = hweight8(data[i]);
9718 if ((hw8 & 0x1) && parity[i])
9720 else if (!(hw8 & 0x1) && !parity[i])
9727 /* Bootstrap checksum at offset 0x10 */
9728 csum = calc_crc((unsigned char *) buf, 0x10);
9729 if (csum != be32_to_cpu(buf[0x10/4]))
9732 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9733 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9734 if (csum != be32_to_cpu(buf[0xfc/4]))
9744 #define TG3_SERDES_TIMEOUT_SEC 2
9745 #define TG3_COPPER_TIMEOUT_SEC 6
9747 static int tg3_test_link(struct tg3 *tp)
9751 if (!netif_running(tp->dev))
9754 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9755 max = TG3_SERDES_TIMEOUT_SEC;
9757 max = TG3_COPPER_TIMEOUT_SEC;
9759 for (i = 0; i < max; i++) {
9760 if (netif_carrier_ok(tp->dev))
9763 if (msleep_interruptible(1000))
9770 /* Only test the commonly used registers */
9771 static int tg3_test_registers(struct tg3 *tp)
9773 int i, is_5705, is_5750;
9774 u32 offset, read_mask, write_mask, val, save_val, read_val;
9778 #define TG3_FL_5705 0x1
9779 #define TG3_FL_NOT_5705 0x2
9780 #define TG3_FL_NOT_5788 0x4
9781 #define TG3_FL_NOT_5750 0x8
9785 /* MAC Control Registers */
9786 { MAC_MODE, TG3_FL_NOT_5705,
9787 0x00000000, 0x00ef6f8c },
9788 { MAC_MODE, TG3_FL_5705,
9789 0x00000000, 0x01ef6b8c },
9790 { MAC_STATUS, TG3_FL_NOT_5705,
9791 0x03800107, 0x00000000 },
9792 { MAC_STATUS, TG3_FL_5705,
9793 0x03800100, 0x00000000 },
9794 { MAC_ADDR_0_HIGH, 0x0000,
9795 0x00000000, 0x0000ffff },
9796 { MAC_ADDR_0_LOW, 0x0000,
9797 0x00000000, 0xffffffff },
9798 { MAC_RX_MTU_SIZE, 0x0000,
9799 0x00000000, 0x0000ffff },
9800 { MAC_TX_MODE, 0x0000,
9801 0x00000000, 0x00000070 },
9802 { MAC_TX_LENGTHS, 0x0000,
9803 0x00000000, 0x00003fff },
9804 { MAC_RX_MODE, TG3_FL_NOT_5705,
9805 0x00000000, 0x000007fc },
9806 { MAC_RX_MODE, TG3_FL_5705,
9807 0x00000000, 0x000007dc },
9808 { MAC_HASH_REG_0, 0x0000,
9809 0x00000000, 0xffffffff },
9810 { MAC_HASH_REG_1, 0x0000,
9811 0x00000000, 0xffffffff },
9812 { MAC_HASH_REG_2, 0x0000,
9813 0x00000000, 0xffffffff },
9814 { MAC_HASH_REG_3, 0x0000,
9815 0x00000000, 0xffffffff },
9817 /* Receive Data and Receive BD Initiator Control Registers. */
9818 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9819 0x00000000, 0xffffffff },
9820 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9821 0x00000000, 0xffffffff },
9822 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9823 0x00000000, 0x00000003 },
9824 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9825 0x00000000, 0xffffffff },
9826 { RCVDBDI_STD_BD+0, 0x0000,
9827 0x00000000, 0xffffffff },
9828 { RCVDBDI_STD_BD+4, 0x0000,
9829 0x00000000, 0xffffffff },
9830 { RCVDBDI_STD_BD+8, 0x0000,
9831 0x00000000, 0xffff0002 },
9832 { RCVDBDI_STD_BD+0xc, 0x0000,
9833 0x00000000, 0xffffffff },
9835 /* Receive BD Initiator Control Registers. */
9836 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9837 0x00000000, 0xffffffff },
9838 { RCVBDI_STD_THRESH, TG3_FL_5705,
9839 0x00000000, 0x000003ff },
9840 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9841 0x00000000, 0xffffffff },
9843 /* Host Coalescing Control Registers. */
9844 { HOSTCC_MODE, TG3_FL_NOT_5705,
9845 0x00000000, 0x00000004 },
9846 { HOSTCC_MODE, TG3_FL_5705,
9847 0x00000000, 0x000000f6 },
9848 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9849 0x00000000, 0xffffffff },
9850 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9851 0x00000000, 0x000003ff },
9852 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9853 0x00000000, 0xffffffff },
9854 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9855 0x00000000, 0x000003ff },
9856 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9857 0x00000000, 0xffffffff },
9858 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9859 0x00000000, 0x000000ff },
9860 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9861 0x00000000, 0xffffffff },
9862 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9863 0x00000000, 0x000000ff },
9864 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9865 0x00000000, 0xffffffff },
9866 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9867 0x00000000, 0xffffffff },
9868 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9869 0x00000000, 0xffffffff },
9870 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9871 0x00000000, 0x000000ff },
9872 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9873 0x00000000, 0xffffffff },
9874 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9875 0x00000000, 0x000000ff },
9876 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9877 0x00000000, 0xffffffff },
9878 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9879 0x00000000, 0xffffffff },
9880 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9881 0x00000000, 0xffffffff },
9882 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9883 0x00000000, 0xffffffff },
9884 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9885 0x00000000, 0xffffffff },
9886 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9887 0xffffffff, 0x00000000 },
9888 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9889 0xffffffff, 0x00000000 },
9891 /* Buffer Manager Control Registers. */
9892 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9893 0x00000000, 0x007fff80 },
9894 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9895 0x00000000, 0x007fffff },
9896 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9897 0x00000000, 0x0000003f },
9898 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9899 0x00000000, 0x000001ff },
9900 { BUFMGR_MB_HIGH_WATER, 0x0000,
9901 0x00000000, 0x000001ff },
9902 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9903 0xffffffff, 0x00000000 },
9904 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9905 0xffffffff, 0x00000000 },
9907 /* Mailbox Registers */
9908 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9909 0x00000000, 0x000001ff },
9910 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9911 0x00000000, 0x000001ff },
9912 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9913 0x00000000, 0x000007ff },
9914 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9915 0x00000000, 0x000001ff },
9917 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9920 is_5705 = is_5750 = 0;
9921 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9923 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9927 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9928 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9931 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9934 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9935 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9938 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9941 offset = (u32) reg_tbl[i].offset;
9942 read_mask = reg_tbl[i].read_mask;
9943 write_mask = reg_tbl[i].write_mask;
9945 /* Save the original register content */
9946 save_val = tr32(offset);
9948 /* Determine the read-only value. */
9949 read_val = save_val & read_mask;
9951 /* Write zero to the register, then make sure the read-only bits
9952 * are not changed and the read/write bits are all zeros.
9958 /* Test the read-only and read/write bits. */
9959 if (((val & read_mask) != read_val) || (val & write_mask))
9962 /* Write ones to all the bits defined by RdMask and WrMask, then
9963 * make sure the read-only bits are not changed and the
9964 * read/write bits are all ones.
9966 tw32(offset, read_mask | write_mask);
9970 /* Test the read-only bits. */
9971 if ((val & read_mask) != read_val)
9974 /* Test the read/write bits. */
9975 if ((val & write_mask) != write_mask)
9978 tw32(offset, save_val);
9984 if (netif_msg_hw(tp))
9985 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9987 tw32(offset, save_val);
9991 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9993 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9997 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9998 for (j = 0; j < len; j += 4) {
10001 tg3_write_mem(tp, offset + j, test_pattern[i]);
10002 tg3_read_mem(tp, offset + j, &val);
10003 if (val != test_pattern[i])
10010 static int tg3_test_memory(struct tg3 *tp)
10012 static struct mem_entry {
10015 } mem_tbl_570x[] = {
10016 { 0x00000000, 0x00b50},
10017 { 0x00002000, 0x1c000},
10018 { 0xffffffff, 0x00000}
10019 }, mem_tbl_5705[] = {
10020 { 0x00000100, 0x0000c},
10021 { 0x00000200, 0x00008},
10022 { 0x00004000, 0x00800},
10023 { 0x00006000, 0x01000},
10024 { 0x00008000, 0x02000},
10025 { 0x00010000, 0x0e000},
10026 { 0xffffffff, 0x00000}
10027 }, mem_tbl_5755[] = {
10028 { 0x00000200, 0x00008},
10029 { 0x00004000, 0x00800},
10030 { 0x00006000, 0x00800},
10031 { 0x00008000, 0x02000},
10032 { 0x00010000, 0x0c000},
10033 { 0xffffffff, 0x00000}
10034 }, mem_tbl_5906[] = {
10035 { 0x00000200, 0x00008},
10036 { 0x00004000, 0x00400},
10037 { 0x00006000, 0x00400},
10038 { 0x00008000, 0x01000},
10039 { 0x00010000, 0x01000},
10040 { 0xffffffff, 0x00000}
10042 struct mem_entry *mem_tbl;
10046 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10047 mem_tbl = mem_tbl_5755;
10048 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10049 mem_tbl = mem_tbl_5906;
10050 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10051 mem_tbl = mem_tbl_5705;
10053 mem_tbl = mem_tbl_570x;
10055 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10056 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10057 mem_tbl[i].len)) != 0)
10064 #define TG3_MAC_LOOPBACK 0
10065 #define TG3_PHY_LOOPBACK 1
10067 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10069 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10070 u32 desc_idx, coal_now;
10071 struct sk_buff *skb, *rx_skb;
10074 int num_pkts, tx_len, rx_len, i, err;
10075 struct tg3_rx_buffer_desc *desc;
10076 struct tg3_napi *tnapi, *rnapi;
10077 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10079 tnapi = &tp->napi[0];
10080 rnapi = &tp->napi[0];
10081 coal_now = tnapi->coal_now | rnapi->coal_now;
10083 if (loopback_mode == TG3_MAC_LOOPBACK) {
10084 /* HW errata - mac loopback fails in some cases on 5780.
10085 * Normal traffic and PHY loopback are not affected by
10088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10091 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10092 MAC_MODE_PORT_INT_LPBACK;
10093 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10094 mac_mode |= MAC_MODE_LINK_POLARITY;
10095 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10096 mac_mode |= MAC_MODE_PORT_MODE_MII;
10098 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10099 tw32(MAC_MODE, mac_mode);
10100 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10103 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10104 tg3_phy_fet_toggle_apd(tp, false);
10105 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10107 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10109 tg3_phy_toggle_automdix(tp, 0);
10111 tg3_writephy(tp, MII_BMCR, val);
10114 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10115 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10117 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10118 mac_mode |= MAC_MODE_PORT_MODE_MII;
10120 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10122 /* reset to prevent losing 1st rx packet intermittently */
10123 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10124 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10126 tw32_f(MAC_RX_MODE, tp->rx_mode);
10128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10129 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10130 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10131 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10132 mac_mode |= MAC_MODE_LINK_POLARITY;
10133 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10134 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10136 tw32(MAC_MODE, mac_mode);
10144 skb = netdev_alloc_skb(tp->dev, tx_len);
10148 tx_data = skb_put(skb, tx_len);
10149 memcpy(tx_data, tp->dev->dev_addr, 6);
10150 memset(tx_data + 6, 0x0, 8);
10152 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10154 for (i = 14; i < tx_len; i++)
10155 tx_data[i] = (u8) (i & 0xff);
10157 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10159 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10164 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10168 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10173 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10174 tr32_mailbox(tnapi->prodmbox);
10178 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10179 for (i = 0; i < 25; i++) {
10180 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10185 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10186 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10187 if ((tx_idx == tnapi->tx_prod) &&
10188 (rx_idx == (rx_start_idx + num_pkts)))
10192 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10193 dev_kfree_skb(skb);
10195 if (tx_idx != tnapi->tx_prod)
10198 if (rx_idx != rx_start_idx + num_pkts)
10201 desc = &rnapi->rx_rcb[rx_start_idx];
10202 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10203 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10204 if (opaque_key != RXD_OPAQUE_RING_STD)
10207 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10208 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10211 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10212 if (rx_len != tx_len)
10215 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10217 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10218 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10220 for (i = 14; i < tx_len; i++) {
10221 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10226 /* tg3_free_rings will unmap and free the rx_skb */
10231 #define TG3_MAC_LOOPBACK_FAILED 1
10232 #define TG3_PHY_LOOPBACK_FAILED 2
10233 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10234 TG3_PHY_LOOPBACK_FAILED)
10236 static int tg3_test_loopback(struct tg3 *tp)
10241 if (!netif_running(tp->dev))
10242 return TG3_LOOPBACK_FAILED;
10244 err = tg3_reset_hw(tp, 1);
10246 return TG3_LOOPBACK_FAILED;
10248 /* Turn off gphy autopowerdown. */
10249 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10250 tg3_phy_toggle_apd(tp, false);
10252 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10256 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10258 /* Wait for up to 40 microseconds to acquire lock. */
10259 for (i = 0; i < 4; i++) {
10260 status = tr32(TG3_CPMU_MUTEX_GNT);
10261 if (status == CPMU_MUTEX_GNT_DRIVER)
10266 if (status != CPMU_MUTEX_GNT_DRIVER)
10267 return TG3_LOOPBACK_FAILED;
10269 /* Turn off link-based power management. */
10270 cpmuctrl = tr32(TG3_CPMU_CTRL);
10271 tw32(TG3_CPMU_CTRL,
10272 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10273 CPMU_CTRL_LINK_AWARE_MODE));
10276 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10277 err |= TG3_MAC_LOOPBACK_FAILED;
10279 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10280 tw32(TG3_CPMU_CTRL, cpmuctrl);
10282 /* Release the mutex */
10283 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10286 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10287 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10288 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10289 err |= TG3_PHY_LOOPBACK_FAILED;
10292 /* Re-enable gphy autopowerdown. */
10293 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10294 tg3_phy_toggle_apd(tp, true);
10299 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10302 struct tg3 *tp = netdev_priv(dev);
10304 if (tp->link_config.phy_is_low_power)
10305 tg3_set_power_state(tp, PCI_D0);
10307 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10309 if (tg3_test_nvram(tp) != 0) {
10310 etest->flags |= ETH_TEST_FL_FAILED;
10313 if (tg3_test_link(tp) != 0) {
10314 etest->flags |= ETH_TEST_FL_FAILED;
10317 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10318 int err, err2 = 0, irq_sync = 0;
10320 if (netif_running(dev)) {
10322 tg3_netif_stop(tp);
10326 tg3_full_lock(tp, irq_sync);
10328 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10329 err = tg3_nvram_lock(tp);
10330 tg3_halt_cpu(tp, RX_CPU_BASE);
10331 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10332 tg3_halt_cpu(tp, TX_CPU_BASE);
10334 tg3_nvram_unlock(tp);
10336 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10339 if (tg3_test_registers(tp) != 0) {
10340 etest->flags |= ETH_TEST_FL_FAILED;
10343 if (tg3_test_memory(tp) != 0) {
10344 etest->flags |= ETH_TEST_FL_FAILED;
10347 if ((data[4] = tg3_test_loopback(tp)) != 0)
10348 etest->flags |= ETH_TEST_FL_FAILED;
10350 tg3_full_unlock(tp);
10352 if (tg3_test_interrupt(tp) != 0) {
10353 etest->flags |= ETH_TEST_FL_FAILED;
10357 tg3_full_lock(tp, 0);
10359 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10360 if (netif_running(dev)) {
10361 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10362 err2 = tg3_restart_hw(tp, 1);
10364 tg3_netif_start(tp);
10367 tg3_full_unlock(tp);
10369 if (irq_sync && !err2)
10372 if (tp->link_config.phy_is_low_power)
10373 tg3_set_power_state(tp, PCI_D3hot);
10377 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10379 struct mii_ioctl_data *data = if_mii(ifr);
10380 struct tg3 *tp = netdev_priv(dev);
10383 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10384 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10386 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10391 data->phy_id = PHY_ADDR;
10394 case SIOCGMIIREG: {
10397 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10398 break; /* We have no PHY */
10400 if (tp->link_config.phy_is_low_power)
10403 spin_lock_bh(&tp->lock);
10404 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10405 spin_unlock_bh(&tp->lock);
10407 data->val_out = mii_regval;
10413 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10414 break; /* We have no PHY */
10416 if (!capable(CAP_NET_ADMIN))
10419 if (tp->link_config.phy_is_low_power)
10422 spin_lock_bh(&tp->lock);
10423 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10424 spin_unlock_bh(&tp->lock);
10432 return -EOPNOTSUPP;
10435 #if TG3_VLAN_TAG_USED
10436 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10438 struct tg3 *tp = netdev_priv(dev);
10440 if (!netif_running(dev)) {
10445 tg3_netif_stop(tp);
10447 tg3_full_lock(tp, 0);
10451 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10452 __tg3_set_rx_mode(dev);
10454 tg3_netif_start(tp);
10456 tg3_full_unlock(tp);
10460 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10462 struct tg3 *tp = netdev_priv(dev);
10464 memcpy(ec, &tp->coal, sizeof(*ec));
10468 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10470 struct tg3 *tp = netdev_priv(dev);
10471 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10472 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10474 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10475 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10476 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10477 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10478 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10481 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10482 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10483 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10484 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10485 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10486 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10487 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10488 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10489 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10490 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10493 /* No rx interrupts will be generated if both are zero */
10494 if ((ec->rx_coalesce_usecs == 0) &&
10495 (ec->rx_max_coalesced_frames == 0))
10498 /* No tx interrupts will be generated if both are zero */
10499 if ((ec->tx_coalesce_usecs == 0) &&
10500 (ec->tx_max_coalesced_frames == 0))
10503 /* Only copy relevant parameters, ignore all others. */
10504 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10505 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10506 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10507 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10508 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10509 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10510 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10511 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10512 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10514 if (netif_running(dev)) {
10515 tg3_full_lock(tp, 0);
10516 __tg3_set_coalesce(tp, &tp->coal);
10517 tg3_full_unlock(tp);
10522 static const struct ethtool_ops tg3_ethtool_ops = {
10523 .get_settings = tg3_get_settings,
10524 .set_settings = tg3_set_settings,
10525 .get_drvinfo = tg3_get_drvinfo,
10526 .get_regs_len = tg3_get_regs_len,
10527 .get_regs = tg3_get_regs,
10528 .get_wol = tg3_get_wol,
10529 .set_wol = tg3_set_wol,
10530 .get_msglevel = tg3_get_msglevel,
10531 .set_msglevel = tg3_set_msglevel,
10532 .nway_reset = tg3_nway_reset,
10533 .get_link = ethtool_op_get_link,
10534 .get_eeprom_len = tg3_get_eeprom_len,
10535 .get_eeprom = tg3_get_eeprom,
10536 .set_eeprom = tg3_set_eeprom,
10537 .get_ringparam = tg3_get_ringparam,
10538 .set_ringparam = tg3_set_ringparam,
10539 .get_pauseparam = tg3_get_pauseparam,
10540 .set_pauseparam = tg3_set_pauseparam,
10541 .get_rx_csum = tg3_get_rx_csum,
10542 .set_rx_csum = tg3_set_rx_csum,
10543 .set_tx_csum = tg3_set_tx_csum,
10544 .set_sg = ethtool_op_set_sg,
10545 .set_tso = tg3_set_tso,
10546 .self_test = tg3_self_test,
10547 .get_strings = tg3_get_strings,
10548 .phys_id = tg3_phys_id,
10549 .get_ethtool_stats = tg3_get_ethtool_stats,
10550 .get_coalesce = tg3_get_coalesce,
10551 .set_coalesce = tg3_set_coalesce,
10552 .get_sset_count = tg3_get_sset_count,
10555 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10557 u32 cursize, val, magic;
10559 tp->nvram_size = EEPROM_CHIP_SIZE;
10561 if (tg3_nvram_read(tp, 0, &magic) != 0)
10564 if ((magic != TG3_EEPROM_MAGIC) &&
10565 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10566 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10570 * Size the chip by reading offsets at increasing powers of two.
10571 * When we encounter our validation signature, we know the addressing
10572 * has wrapped around, and thus have our chip size.
10576 while (cursize < tp->nvram_size) {
10577 if (tg3_nvram_read(tp, cursize, &val) != 0)
10586 tp->nvram_size = cursize;
10589 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10593 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10594 tg3_nvram_read(tp, 0, &val) != 0)
10597 /* Selfboot format */
10598 if (val != TG3_EEPROM_MAGIC) {
10599 tg3_get_eeprom_size(tp);
10603 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10605 /* This is confusing. We want to operate on the
10606 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10607 * call will read from NVRAM and byteswap the data
10608 * according to the byteswapping settings for all
10609 * other register accesses. This ensures the data we
10610 * want will always reside in the lower 16-bits.
10611 * However, the data in NVRAM is in LE format, which
10612 * means the data from the NVRAM read will always be
10613 * opposite the endianness of the CPU. The 16-bit
10614 * byteswap then brings the data to CPU endianness.
10616 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10620 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10623 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10627 nvcfg1 = tr32(NVRAM_CFG1);
10628 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10629 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10631 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10632 tw32(NVRAM_CFG1, nvcfg1);
10635 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10636 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10637 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10638 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10639 tp->nvram_jedecnum = JEDEC_ATMEL;
10640 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10641 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10643 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10644 tp->nvram_jedecnum = JEDEC_ATMEL;
10645 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10647 case FLASH_VENDOR_ATMEL_EEPROM:
10648 tp->nvram_jedecnum = JEDEC_ATMEL;
10649 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10650 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10652 case FLASH_VENDOR_ST:
10653 tp->nvram_jedecnum = JEDEC_ST;
10654 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10655 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10657 case FLASH_VENDOR_SAIFUN:
10658 tp->nvram_jedecnum = JEDEC_SAIFUN;
10659 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10661 case FLASH_VENDOR_SST_SMALL:
10662 case FLASH_VENDOR_SST_LARGE:
10663 tp->nvram_jedecnum = JEDEC_SST;
10664 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10668 tp->nvram_jedecnum = JEDEC_ATMEL;
10669 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10670 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10674 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10678 nvcfg1 = tr32(NVRAM_CFG1);
10680 /* NVRAM protection for TPM */
10681 if (nvcfg1 & (1 << 27))
10682 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10684 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10685 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10686 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10687 tp->nvram_jedecnum = JEDEC_ATMEL;
10688 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10690 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10691 tp->nvram_jedecnum = JEDEC_ATMEL;
10692 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10693 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10695 case FLASH_5752VENDOR_ST_M45PE10:
10696 case FLASH_5752VENDOR_ST_M45PE20:
10697 case FLASH_5752VENDOR_ST_M45PE40:
10698 tp->nvram_jedecnum = JEDEC_ST;
10699 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10700 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10704 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10705 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10706 case FLASH_5752PAGE_SIZE_256:
10707 tp->nvram_pagesize = 256;
10709 case FLASH_5752PAGE_SIZE_512:
10710 tp->nvram_pagesize = 512;
10712 case FLASH_5752PAGE_SIZE_1K:
10713 tp->nvram_pagesize = 1024;
10715 case FLASH_5752PAGE_SIZE_2K:
10716 tp->nvram_pagesize = 2048;
10718 case FLASH_5752PAGE_SIZE_4K:
10719 tp->nvram_pagesize = 4096;
10721 case FLASH_5752PAGE_SIZE_264:
10722 tp->nvram_pagesize = 264;
10726 /* For eeprom, set pagesize to maximum eeprom size */
10727 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10729 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10730 tw32(NVRAM_CFG1, nvcfg1);
10734 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10736 u32 nvcfg1, protect = 0;
10738 nvcfg1 = tr32(NVRAM_CFG1);
10740 /* NVRAM protection for TPM */
10741 if (nvcfg1 & (1 << 27)) {
10742 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10746 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10748 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10749 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10750 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10751 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10752 tp->nvram_jedecnum = JEDEC_ATMEL;
10753 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10754 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10755 tp->nvram_pagesize = 264;
10756 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10757 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10758 tp->nvram_size = (protect ? 0x3e200 :
10759 TG3_NVRAM_SIZE_512KB);
10760 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10761 tp->nvram_size = (protect ? 0x1f200 :
10762 TG3_NVRAM_SIZE_256KB);
10764 tp->nvram_size = (protect ? 0x1f200 :
10765 TG3_NVRAM_SIZE_128KB);
10767 case FLASH_5752VENDOR_ST_M45PE10:
10768 case FLASH_5752VENDOR_ST_M45PE20:
10769 case FLASH_5752VENDOR_ST_M45PE40:
10770 tp->nvram_jedecnum = JEDEC_ST;
10771 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10772 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10773 tp->nvram_pagesize = 256;
10774 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10775 tp->nvram_size = (protect ?
10776 TG3_NVRAM_SIZE_64KB :
10777 TG3_NVRAM_SIZE_128KB);
10778 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10779 tp->nvram_size = (protect ?
10780 TG3_NVRAM_SIZE_64KB :
10781 TG3_NVRAM_SIZE_256KB);
10783 tp->nvram_size = (protect ?
10784 TG3_NVRAM_SIZE_128KB :
10785 TG3_NVRAM_SIZE_512KB);
10790 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10794 nvcfg1 = tr32(NVRAM_CFG1);
10796 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10797 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10798 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10799 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10800 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10801 tp->nvram_jedecnum = JEDEC_ATMEL;
10802 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10803 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10805 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10806 tw32(NVRAM_CFG1, nvcfg1);
10808 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10809 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10810 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10811 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10812 tp->nvram_jedecnum = JEDEC_ATMEL;
10813 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10814 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10815 tp->nvram_pagesize = 264;
10817 case FLASH_5752VENDOR_ST_M45PE10:
10818 case FLASH_5752VENDOR_ST_M45PE20:
10819 case FLASH_5752VENDOR_ST_M45PE40:
10820 tp->nvram_jedecnum = JEDEC_ST;
10821 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10822 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10823 tp->nvram_pagesize = 256;
10828 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10830 u32 nvcfg1, protect = 0;
10832 nvcfg1 = tr32(NVRAM_CFG1);
10834 /* NVRAM protection for TPM */
10835 if (nvcfg1 & (1 << 27)) {
10836 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10840 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10842 case FLASH_5761VENDOR_ATMEL_ADB021D:
10843 case FLASH_5761VENDOR_ATMEL_ADB041D:
10844 case FLASH_5761VENDOR_ATMEL_ADB081D:
10845 case FLASH_5761VENDOR_ATMEL_ADB161D:
10846 case FLASH_5761VENDOR_ATMEL_MDB021D:
10847 case FLASH_5761VENDOR_ATMEL_MDB041D:
10848 case FLASH_5761VENDOR_ATMEL_MDB081D:
10849 case FLASH_5761VENDOR_ATMEL_MDB161D:
10850 tp->nvram_jedecnum = JEDEC_ATMEL;
10851 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10852 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10853 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10854 tp->nvram_pagesize = 256;
10856 case FLASH_5761VENDOR_ST_A_M45PE20:
10857 case FLASH_5761VENDOR_ST_A_M45PE40:
10858 case FLASH_5761VENDOR_ST_A_M45PE80:
10859 case FLASH_5761VENDOR_ST_A_M45PE16:
10860 case FLASH_5761VENDOR_ST_M_M45PE20:
10861 case FLASH_5761VENDOR_ST_M_M45PE40:
10862 case FLASH_5761VENDOR_ST_M_M45PE80:
10863 case FLASH_5761VENDOR_ST_M_M45PE16:
10864 tp->nvram_jedecnum = JEDEC_ST;
10865 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10866 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10867 tp->nvram_pagesize = 256;
10872 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10875 case FLASH_5761VENDOR_ATMEL_ADB161D:
10876 case FLASH_5761VENDOR_ATMEL_MDB161D:
10877 case FLASH_5761VENDOR_ST_A_M45PE16:
10878 case FLASH_5761VENDOR_ST_M_M45PE16:
10879 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10881 case FLASH_5761VENDOR_ATMEL_ADB081D:
10882 case FLASH_5761VENDOR_ATMEL_MDB081D:
10883 case FLASH_5761VENDOR_ST_A_M45PE80:
10884 case FLASH_5761VENDOR_ST_M_M45PE80:
10885 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10887 case FLASH_5761VENDOR_ATMEL_ADB041D:
10888 case FLASH_5761VENDOR_ATMEL_MDB041D:
10889 case FLASH_5761VENDOR_ST_A_M45PE40:
10890 case FLASH_5761VENDOR_ST_M_M45PE40:
10891 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10893 case FLASH_5761VENDOR_ATMEL_ADB021D:
10894 case FLASH_5761VENDOR_ATMEL_MDB021D:
10895 case FLASH_5761VENDOR_ST_A_M45PE20:
10896 case FLASH_5761VENDOR_ST_M_M45PE20:
10897 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10903 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10905 tp->nvram_jedecnum = JEDEC_ATMEL;
10906 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10907 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10910 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10914 nvcfg1 = tr32(NVRAM_CFG1);
10916 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10917 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10918 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10919 tp->nvram_jedecnum = JEDEC_ATMEL;
10920 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10921 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10923 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10924 tw32(NVRAM_CFG1, nvcfg1);
10926 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10927 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10928 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10929 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10930 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10931 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10932 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10933 tp->nvram_jedecnum = JEDEC_ATMEL;
10934 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10935 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10937 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10938 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10939 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10940 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10941 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10943 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10944 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10945 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10947 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10948 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10949 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10953 case FLASH_5752VENDOR_ST_M45PE10:
10954 case FLASH_5752VENDOR_ST_M45PE20:
10955 case FLASH_5752VENDOR_ST_M45PE40:
10956 tp->nvram_jedecnum = JEDEC_ST;
10957 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10958 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10960 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10961 case FLASH_5752VENDOR_ST_M45PE10:
10962 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10964 case FLASH_5752VENDOR_ST_M45PE20:
10965 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10967 case FLASH_5752VENDOR_ST_M45PE40:
10968 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10973 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
10977 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10978 case FLASH_5752PAGE_SIZE_256:
10979 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10980 tp->nvram_pagesize = 256;
10982 case FLASH_5752PAGE_SIZE_512:
10983 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10984 tp->nvram_pagesize = 512;
10986 case FLASH_5752PAGE_SIZE_1K:
10987 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10988 tp->nvram_pagesize = 1024;
10990 case FLASH_5752PAGE_SIZE_2K:
10991 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10992 tp->nvram_pagesize = 2048;
10994 case FLASH_5752PAGE_SIZE_4K:
10995 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10996 tp->nvram_pagesize = 4096;
10998 case FLASH_5752PAGE_SIZE_264:
10999 tp->nvram_pagesize = 264;
11001 case FLASH_5752PAGE_SIZE_528:
11002 tp->nvram_pagesize = 528;
11007 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11008 static void __devinit tg3_nvram_init(struct tg3 *tp)
11010 tw32_f(GRC_EEPROM_ADDR,
11011 (EEPROM_ADDR_FSM_RESET |
11012 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11013 EEPROM_ADDR_CLKPERD_SHIFT)));
11017 /* Enable seeprom accesses. */
11018 tw32_f(GRC_LOCAL_CTRL,
11019 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11022 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11023 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11024 tp->tg3_flags |= TG3_FLAG_NVRAM;
11026 if (tg3_nvram_lock(tp)) {
11027 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11028 "tg3_nvram_init failed.\n", tp->dev->name);
11031 tg3_enable_nvram_access(tp);
11033 tp->nvram_size = 0;
11035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11036 tg3_get_5752_nvram_info(tp);
11037 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11038 tg3_get_5755_nvram_info(tp);
11039 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11042 tg3_get_5787_nvram_info(tp);
11043 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11044 tg3_get_5761_nvram_info(tp);
11045 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11046 tg3_get_5906_nvram_info(tp);
11047 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11048 tg3_get_57780_nvram_info(tp);
11050 tg3_get_nvram_info(tp);
11052 if (tp->nvram_size == 0)
11053 tg3_get_nvram_size(tp);
11055 tg3_disable_nvram_access(tp);
11056 tg3_nvram_unlock(tp);
11059 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11061 tg3_get_eeprom_size(tp);
11065 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11066 u32 offset, u32 len, u8 *buf)
11071 for (i = 0; i < len; i += 4) {
11077 memcpy(&data, buf + i, 4);
11080 * The SEEPROM interface expects the data to always be opposite
11081 * the native endian format. We accomplish this by reversing
11082 * all the operations that would have been performed on the
11083 * data from a call to tg3_nvram_read_be32().
11085 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11087 val = tr32(GRC_EEPROM_ADDR);
11088 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11090 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11092 tw32(GRC_EEPROM_ADDR, val |
11093 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11094 (addr & EEPROM_ADDR_ADDR_MASK) |
11095 EEPROM_ADDR_START |
11096 EEPROM_ADDR_WRITE);
11098 for (j = 0; j < 1000; j++) {
11099 val = tr32(GRC_EEPROM_ADDR);
11101 if (val & EEPROM_ADDR_COMPLETE)
11105 if (!(val & EEPROM_ADDR_COMPLETE)) {
11114 /* offset and length are dword aligned */
11115 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11119 u32 pagesize = tp->nvram_pagesize;
11120 u32 pagemask = pagesize - 1;
11124 tmp = kmalloc(pagesize, GFP_KERNEL);
11130 u32 phy_addr, page_off, size;
11132 phy_addr = offset & ~pagemask;
11134 for (j = 0; j < pagesize; j += 4) {
11135 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11136 (__be32 *) (tmp + j));
11143 page_off = offset & pagemask;
11150 memcpy(tmp + page_off, buf, size);
11152 offset = offset + (pagesize - page_off);
11154 tg3_enable_nvram_access(tp);
11157 * Before we can erase the flash page, we need
11158 * to issue a special "write enable" command.
11160 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11162 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11165 /* Erase the target page */
11166 tw32(NVRAM_ADDR, phy_addr);
11168 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11169 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11171 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11174 /* Issue another write enable to start the write. */
11175 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11177 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11180 for (j = 0; j < pagesize; j += 4) {
11183 data = *((__be32 *) (tmp + j));
11185 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11187 tw32(NVRAM_ADDR, phy_addr + j);
11189 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11193 nvram_cmd |= NVRAM_CMD_FIRST;
11194 else if (j == (pagesize - 4))
11195 nvram_cmd |= NVRAM_CMD_LAST;
11197 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11204 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11205 tg3_nvram_exec_cmd(tp, nvram_cmd);
11212 /* offset and length are dword aligned */
11213 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11218 for (i = 0; i < len; i += 4, offset += 4) {
11219 u32 page_off, phy_addr, nvram_cmd;
11222 memcpy(&data, buf + i, 4);
11223 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11225 page_off = offset % tp->nvram_pagesize;
11227 phy_addr = tg3_nvram_phys_addr(tp, offset);
11229 tw32(NVRAM_ADDR, phy_addr);
11231 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11233 if ((page_off == 0) || (i == 0))
11234 nvram_cmd |= NVRAM_CMD_FIRST;
11235 if (page_off == (tp->nvram_pagesize - 4))
11236 nvram_cmd |= NVRAM_CMD_LAST;
11238 if (i == (len - 4))
11239 nvram_cmd |= NVRAM_CMD_LAST;
11241 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11242 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11243 (tp->nvram_jedecnum == JEDEC_ST) &&
11244 (nvram_cmd & NVRAM_CMD_FIRST)) {
11246 if ((ret = tg3_nvram_exec_cmd(tp,
11247 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11252 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11253 /* We always do complete word writes to eeprom. */
11254 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11257 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11263 /* offset and length are dword aligned */
11264 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11268 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11269 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11270 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11274 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11275 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11280 ret = tg3_nvram_lock(tp);
11284 tg3_enable_nvram_access(tp);
11285 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11286 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11287 tw32(NVRAM_WRITE1, 0x406);
11289 grc_mode = tr32(GRC_MODE);
11290 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11292 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11293 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11295 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11299 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11303 grc_mode = tr32(GRC_MODE);
11304 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11306 tg3_disable_nvram_access(tp);
11307 tg3_nvram_unlock(tp);
11310 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11311 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11318 struct subsys_tbl_ent {
11319 u16 subsys_vendor, subsys_devid;
11323 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11324 /* Broadcom boards. */
11325 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11326 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11327 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11328 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11329 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11330 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11331 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11332 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11333 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11334 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11335 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11338 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11339 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11340 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11341 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11342 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11345 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11346 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11347 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11348 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11350 /* Compaq boards. */
11351 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11352 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11353 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11354 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11355 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11358 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11361 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11365 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11366 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11367 tp->pdev->subsystem_vendor) &&
11368 (subsys_id_to_phy_id[i].subsys_devid ==
11369 tp->pdev->subsystem_device))
11370 return &subsys_id_to_phy_id[i];
11375 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11380 /* On some early chips the SRAM cannot be accessed in D3hot state,
11381 * so need make sure we're in D0.
11383 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11384 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11385 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11388 /* Make sure register accesses (indirect or otherwise)
11389 * will function correctly.
11391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11392 tp->misc_host_ctrl);
11394 /* The memory arbiter has to be enabled in order for SRAM accesses
11395 * to succeed. Normally on powerup the tg3 chip firmware will make
11396 * sure it is enabled, but other entities such as system netboot
11397 * code might disable it.
11399 val = tr32(MEMARB_MODE);
11400 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11402 tp->phy_id = PHY_ID_INVALID;
11403 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11405 /* Assume an onboard device and WOL capable by default. */
11406 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11409 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11410 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11411 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11413 val = tr32(VCPU_CFGSHDW);
11414 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11415 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11416 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11417 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11418 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11422 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11423 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11424 u32 nic_cfg, led_cfg;
11425 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11426 int eeprom_phy_serdes = 0;
11428 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11429 tp->nic_sram_data_cfg = nic_cfg;
11431 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11432 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11433 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11434 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11435 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11436 (ver > 0) && (ver < 0x100))
11437 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11440 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11442 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11443 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11444 eeprom_phy_serdes = 1;
11446 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11447 if (nic_phy_id != 0) {
11448 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11449 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11451 eeprom_phy_id = (id1 >> 16) << 10;
11452 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11453 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11457 tp->phy_id = eeprom_phy_id;
11458 if (eeprom_phy_serdes) {
11459 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11460 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11462 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11465 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11466 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11467 SHASTA_EXT_LED_MODE_MASK);
11469 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11473 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11474 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11477 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11478 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11481 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11482 tp->led_ctrl = LED_CTRL_MODE_MAC;
11484 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11485 * read on some older 5700/5701 bootcode.
11487 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11489 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11491 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11495 case SHASTA_EXT_LED_SHARED:
11496 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11497 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11498 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11499 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11500 LED_CTRL_MODE_PHY_2);
11503 case SHASTA_EXT_LED_MAC:
11504 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11507 case SHASTA_EXT_LED_COMBO:
11508 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11509 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11510 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11511 LED_CTRL_MODE_PHY_2);
11516 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11518 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11519 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11521 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11522 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11524 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11525 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11526 if ((tp->pdev->subsystem_vendor ==
11527 PCI_VENDOR_ID_ARIMA) &&
11528 (tp->pdev->subsystem_device == 0x205a ||
11529 tp->pdev->subsystem_device == 0x2063))
11530 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11532 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11533 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11536 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11537 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11538 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11539 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11542 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11543 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11544 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11546 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11547 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11548 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11550 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11551 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11552 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11554 if (cfg2 & (1 << 17))
11555 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11557 /* serdes signal pre-emphasis in register 0x590 set by */
11558 /* bootcode if bit 18 is set */
11559 if (cfg2 & (1 << 18))
11560 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11562 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11563 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11564 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11565 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11567 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11570 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11571 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11572 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11575 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11576 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11577 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11578 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11579 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11580 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11583 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11584 device_set_wakeup_enable(&tp->pdev->dev,
11585 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11588 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11593 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11594 tw32(OTP_CTRL, cmd);
11596 /* Wait for up to 1 ms for command to execute. */
11597 for (i = 0; i < 100; i++) {
11598 val = tr32(OTP_STATUS);
11599 if (val & OTP_STATUS_CMD_DONE)
11604 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11607 /* Read the gphy configuration from the OTP region of the chip. The gphy
11608 * configuration is a 32-bit value that straddles the alignment boundary.
11609 * We do two 32-bit reads and then shift and merge the results.
11611 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11613 u32 bhalf_otp, thalf_otp;
11615 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11617 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11620 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11622 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11625 thalf_otp = tr32(OTP_READ_DATA);
11627 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11629 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11632 bhalf_otp = tr32(OTP_READ_DATA);
11634 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11637 static int __devinit tg3_phy_probe(struct tg3 *tp)
11639 u32 hw_phy_id_1, hw_phy_id_2;
11640 u32 hw_phy_id, hw_phy_id_masked;
11643 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11644 return tg3_phy_init(tp);
11646 /* Reading the PHY ID register can conflict with ASF
11647 * firmware access to the PHY hardware.
11650 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11651 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11652 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11654 /* Now read the physical PHY_ID from the chip and verify
11655 * that it is sane. If it doesn't look good, we fall back
11656 * to either the hard-coded table based PHY_ID and failing
11657 * that the value found in the eeprom area.
11659 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11660 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11662 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11663 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11664 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11666 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11669 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11670 tp->phy_id = hw_phy_id;
11671 if (hw_phy_id_masked == PHY_ID_BCM8002)
11672 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11674 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11676 if (tp->phy_id != PHY_ID_INVALID) {
11677 /* Do nothing, phy ID already set up in
11678 * tg3_get_eeprom_hw_cfg().
11681 struct subsys_tbl_ent *p;
11683 /* No eeprom signature? Try the hardcoded
11684 * subsys device table.
11686 p = lookup_by_subsys(tp);
11690 tp->phy_id = p->phy_id;
11692 tp->phy_id == PHY_ID_BCM8002)
11693 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11697 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11698 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11699 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11700 u32 bmsr, adv_reg, tg3_ctrl, mask;
11702 tg3_readphy(tp, MII_BMSR, &bmsr);
11703 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11704 (bmsr & BMSR_LSTATUS))
11705 goto skip_phy_reset;
11707 err = tg3_phy_reset(tp);
11711 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11712 ADVERTISE_100HALF | ADVERTISE_100FULL |
11713 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11715 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11716 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11717 MII_TG3_CTRL_ADV_1000_FULL);
11718 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11719 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11720 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11721 MII_TG3_CTRL_ENABLE_AS_MASTER);
11724 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11725 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11726 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11727 if (!tg3_copper_is_advertising_all(tp, mask)) {
11728 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11730 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11731 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11733 tg3_writephy(tp, MII_BMCR,
11734 BMCR_ANENABLE | BMCR_ANRESTART);
11736 tg3_phy_set_wirespeed(tp);
11738 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11739 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11740 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11744 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11745 err = tg3_init_5401phy_dsp(tp);
11750 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11751 err = tg3_init_5401phy_dsp(tp);
11754 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11755 tp->link_config.advertising =
11756 (ADVERTISED_1000baseT_Half |
11757 ADVERTISED_1000baseT_Full |
11758 ADVERTISED_Autoneg |
11760 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11761 tp->link_config.advertising &=
11762 ~(ADVERTISED_1000baseT_Half |
11763 ADVERTISED_1000baseT_Full);
11768 static void __devinit tg3_read_partno(struct tg3 *tp)
11770 unsigned char vpd_data[256]; /* in little-endian format */
11774 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11775 tg3_nvram_read(tp, 0x0, &magic))
11776 goto out_not_found;
11778 if (magic == TG3_EEPROM_MAGIC) {
11779 for (i = 0; i < 256; i += 4) {
11782 /* The data is in little-endian format in NVRAM.
11783 * Use the big-endian read routines to preserve
11784 * the byte order as it exists in NVRAM.
11786 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
11787 goto out_not_found;
11789 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
11794 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11795 for (i = 0; i < 256; i += 4) {
11800 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11802 while (j++ < 100) {
11803 pci_read_config_word(tp->pdev, vpd_cap +
11804 PCI_VPD_ADDR, &tmp16);
11805 if (tmp16 & 0x8000)
11809 if (!(tmp16 & 0x8000))
11810 goto out_not_found;
11812 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11814 v = cpu_to_le32(tmp);
11815 memcpy(&vpd_data[i], &v, sizeof(v));
11819 /* Now parse and find the part number. */
11820 for (i = 0; i < 254; ) {
11821 unsigned char val = vpd_data[i];
11822 unsigned int block_end;
11824 if (val == 0x82 || val == 0x91) {
11827 (vpd_data[i + 2] << 8)));
11832 goto out_not_found;
11834 block_end = (i + 3 +
11836 (vpd_data[i + 2] << 8)));
11839 if (block_end > 256)
11840 goto out_not_found;
11842 while (i < (block_end - 2)) {
11843 if (vpd_data[i + 0] == 'P' &&
11844 vpd_data[i + 1] == 'N') {
11845 int partno_len = vpd_data[i + 2];
11848 if (partno_len > 24 || (partno_len + i) > 256)
11849 goto out_not_found;
11851 memcpy(tp->board_part_number,
11852 &vpd_data[i], partno_len);
11857 i += 3 + vpd_data[i + 2];
11860 /* Part number not found. */
11861 goto out_not_found;
11865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11866 strcpy(tp->board_part_number, "BCM95906");
11867 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11868 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
11869 strcpy(tp->board_part_number, "BCM57780");
11870 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
11872 strcpy(tp->board_part_number, "BCM57760");
11873 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11874 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
11875 strcpy(tp->board_part_number, "BCM57790");
11876 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11877 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
11878 strcpy(tp->board_part_number, "BCM57788");
11880 strcpy(tp->board_part_number, "none");
11883 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11887 if (tg3_nvram_read(tp, offset, &val) ||
11888 (val & 0xfc000000) != 0x0c000000 ||
11889 tg3_nvram_read(tp, offset + 4, &val) ||
11896 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
11898 u32 val, offset, start, ver_offset;
11900 bool newver = false;
11902 if (tg3_nvram_read(tp, 0xc, &offset) ||
11903 tg3_nvram_read(tp, 0x4, &start))
11906 offset = tg3_nvram_logical_addr(tp, offset);
11908 if (tg3_nvram_read(tp, offset, &val))
11911 if ((val & 0xfc000000) == 0x0c000000) {
11912 if (tg3_nvram_read(tp, offset + 4, &val))
11920 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
11923 offset = offset + ver_offset - start;
11924 for (i = 0; i < 16; i += 4) {
11926 if (tg3_nvram_read_be32(tp, offset + i, &v))
11929 memcpy(tp->fw_ver + i, &v, sizeof(v));
11934 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
11937 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
11938 TG3_NVM_BCVER_MAJSFT;
11939 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
11940 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
11944 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
11946 u32 val, major, minor;
11948 /* Use native endian representation */
11949 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
11952 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
11953 TG3_NVM_HWSB_CFG1_MAJSFT;
11954 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
11955 TG3_NVM_HWSB_CFG1_MINSFT;
11957 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
11960 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11962 u32 offset, major, minor, build;
11964 tp->fw_ver[0] = 's';
11965 tp->fw_ver[1] = 'b';
11966 tp->fw_ver[2] = '\0';
11968 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11971 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11972 case TG3_EEPROM_SB_REVISION_0:
11973 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11975 case TG3_EEPROM_SB_REVISION_2:
11976 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11978 case TG3_EEPROM_SB_REVISION_3:
11979 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11985 if (tg3_nvram_read(tp, offset, &val))
11988 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11989 TG3_EEPROM_SB_EDH_BLD_SHFT;
11990 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11991 TG3_EEPROM_SB_EDH_MAJ_SHFT;
11992 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
11994 if (minor > 99 || build > 26)
11997 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12000 tp->fw_ver[8] = 'a' + build - 1;
12001 tp->fw_ver[9] = '\0';
12005 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12007 u32 val, offset, start;
12010 for (offset = TG3_NVM_DIR_START;
12011 offset < TG3_NVM_DIR_END;
12012 offset += TG3_NVM_DIRENT_SIZE) {
12013 if (tg3_nvram_read(tp, offset, &val))
12016 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12020 if (offset == TG3_NVM_DIR_END)
12023 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12024 start = 0x08000000;
12025 else if (tg3_nvram_read(tp, offset - 4, &start))
12028 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12029 !tg3_fw_img_is_valid(tp, offset) ||
12030 tg3_nvram_read(tp, offset + 8, &val))
12033 offset += val - start;
12035 vlen = strlen(tp->fw_ver);
12037 tp->fw_ver[vlen++] = ',';
12038 tp->fw_ver[vlen++] = ' ';
12040 for (i = 0; i < 4; i++) {
12042 if (tg3_nvram_read_be32(tp, offset, &v))
12045 offset += sizeof(v);
12047 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12048 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12052 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12057 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12062 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12063 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12066 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12067 if (apedata != APE_SEG_SIG_MAGIC)
12070 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12071 if (!(apedata & APE_FW_STATUS_READY))
12074 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12076 vlen = strlen(tp->fw_ver);
12078 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12079 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12080 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12081 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12082 (apedata & APE_FW_VERSION_BLDMSK));
12085 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12089 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12090 tp->fw_ver[0] = 's';
12091 tp->fw_ver[1] = 'b';
12092 tp->fw_ver[2] = '\0';
12097 if (tg3_nvram_read(tp, 0, &val))
12100 if (val == TG3_EEPROM_MAGIC)
12101 tg3_read_bc_ver(tp);
12102 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12103 tg3_read_sb_ver(tp, val);
12104 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12105 tg3_read_hwsb_ver(tp);
12109 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12110 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12113 tg3_read_mgmtfw_ver(tp);
12115 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12118 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12120 static int __devinit tg3_get_invariants(struct tg3 *tp)
12122 static struct pci_device_id write_reorder_chipsets[] = {
12123 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12124 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12125 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12126 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12127 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12128 PCI_DEVICE_ID_VIA_8385_0) },
12132 u32 pci_state_reg, grc_misc_cfg;
12137 /* Force memory write invalidate off. If we leave it on,
12138 * then on 5700_BX chips we have to enable a workaround.
12139 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12140 * to match the cacheline size. The Broadcom driver have this
12141 * workaround but turns MWI off all the times so never uses
12142 * it. This seems to suggest that the workaround is insufficient.
12144 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12145 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12146 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12148 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12149 * has the register indirect write enable bit set before
12150 * we try to access any of the MMIO registers. It is also
12151 * critical that the PCI-X hw workaround situation is decided
12152 * before that as well.
12154 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12157 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12158 MISC_HOST_CTRL_CHIPREV_SHIFT);
12159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12160 u32 prod_id_asic_rev;
12162 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12163 &prod_id_asic_rev);
12164 tp->pci_chip_rev_id = prod_id_asic_rev;
12167 /* Wrong chip ID in 5752 A0. This code can be removed later
12168 * as A0 is not in production.
12170 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12171 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12173 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12174 * we need to disable memory and use config. cycles
12175 * only to access all registers. The 5702/03 chips
12176 * can mistakenly decode the special cycles from the
12177 * ICH chipsets as memory write cycles, causing corruption
12178 * of register and memory space. Only certain ICH bridges
12179 * will drive special cycles with non-zero data during the
12180 * address phase which can fall within the 5703's address
12181 * range. This is not an ICH bug as the PCI spec allows
12182 * non-zero address during special cycles. However, only
12183 * these ICH bridges are known to drive non-zero addresses
12184 * during special cycles.
12186 * Since special cycles do not cross PCI bridges, we only
12187 * enable this workaround if the 5703 is on the secondary
12188 * bus of these ICH bridges.
12190 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12191 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12192 static struct tg3_dev_id {
12196 } ich_chipsets[] = {
12197 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12199 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12201 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12203 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12207 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12208 struct pci_dev *bridge = NULL;
12210 while (pci_id->vendor != 0) {
12211 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12217 if (pci_id->rev != PCI_ANY_ID) {
12218 if (bridge->revision > pci_id->rev)
12221 if (bridge->subordinate &&
12222 (bridge->subordinate->number ==
12223 tp->pdev->bus->number)) {
12225 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12226 pci_dev_put(bridge);
12232 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12233 static struct tg3_dev_id {
12236 } bridge_chipsets[] = {
12237 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12238 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12241 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12242 struct pci_dev *bridge = NULL;
12244 while (pci_id->vendor != 0) {
12245 bridge = pci_get_device(pci_id->vendor,
12252 if (bridge->subordinate &&
12253 (bridge->subordinate->number <=
12254 tp->pdev->bus->number) &&
12255 (bridge->subordinate->subordinate >=
12256 tp->pdev->bus->number)) {
12257 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12258 pci_dev_put(bridge);
12264 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12265 * DMA addresses > 40-bit. This bridge may have other additional
12266 * 57xx devices behind it in some 4-port NIC designs for example.
12267 * Any tg3 device found behind the bridge will also need the 40-bit
12270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12271 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12272 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12273 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12274 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12277 struct pci_dev *bridge = NULL;
12280 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12281 PCI_DEVICE_ID_SERVERWORKS_EPB,
12283 if (bridge && bridge->subordinate &&
12284 (bridge->subordinate->number <=
12285 tp->pdev->bus->number) &&
12286 (bridge->subordinate->subordinate >=
12287 tp->pdev->bus->number)) {
12288 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12289 pci_dev_put(bridge);
12295 /* Initialize misc host control in PCI block. */
12296 tp->misc_host_ctrl |= (misc_ctrl_reg &
12297 MISC_HOST_CTRL_CHIPREV);
12298 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12299 tp->misc_host_ctrl);
12301 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12302 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12303 tp->pdev_peer = tg3_find_peer(tp);
12305 /* Intentionally exclude ASIC_REV_5906 */
12306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12312 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12316 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12317 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12318 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12319 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12321 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12322 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12323 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12325 /* 5700 B0 chips do not support checksumming correctly due
12326 * to hardware bugs.
12328 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12329 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12331 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12332 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12333 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12334 tp->dev->features |= NETIF_F_IPV6_CSUM;
12337 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12338 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12339 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12340 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12341 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12342 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12343 tp->pdev_peer == tp->pdev))
12344 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12346 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12348 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12349 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12351 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12352 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12354 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12355 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12361 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12362 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12363 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12365 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12368 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12369 if (tp->pcie_cap != 0) {
12372 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12374 pcie_set_readrq(tp->pdev, 4096);
12376 pci_read_config_word(tp->pdev,
12377 tp->pcie_cap + PCI_EXP_LNKCTL,
12379 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12381 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12384 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12385 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12386 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12388 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12389 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12390 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12391 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12392 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12393 if (!tp->pcix_cap) {
12394 printk(KERN_ERR PFX "Cannot find PCI-X "
12395 "capability, aborting.\n");
12399 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12400 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12403 /* If we have an AMD 762 or VIA K8T800 chipset, write
12404 * reordering to the mailbox registers done by the host
12405 * controller can cause major troubles. We read back from
12406 * every mailbox register write to force the writes to be
12407 * posted to the chip in order.
12409 if (pci_dev_present(write_reorder_chipsets) &&
12410 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12411 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12413 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12414 &tp->pci_cacheline_sz);
12415 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12416 &tp->pci_lat_timer);
12417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12418 tp->pci_lat_timer < 64) {
12419 tp->pci_lat_timer = 64;
12420 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12421 tp->pci_lat_timer);
12424 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12425 /* 5700 BX chips need to have their TX producer index
12426 * mailboxes written twice to workaround a bug.
12428 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12430 /* If we are in PCI-X mode, enable register write workaround.
12432 * The workaround is to use indirect register accesses
12433 * for all chip writes not to mailbox registers.
12435 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12438 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12440 /* The chip can have it's power management PCI config
12441 * space registers clobbered due to this bug.
12442 * So explicitly force the chip into D0 here.
12444 pci_read_config_dword(tp->pdev,
12445 tp->pm_cap + PCI_PM_CTRL,
12447 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12448 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12449 pci_write_config_dword(tp->pdev,
12450 tp->pm_cap + PCI_PM_CTRL,
12453 /* Also, force SERR#/PERR# in PCI command. */
12454 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12455 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12456 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12460 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12461 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12462 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12463 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12465 /* Chip-specific fixup from Broadcom driver */
12466 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12467 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12468 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12469 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12472 /* Default fast path register access methods */
12473 tp->read32 = tg3_read32;
12474 tp->write32 = tg3_write32;
12475 tp->read32_mbox = tg3_read32;
12476 tp->write32_mbox = tg3_write32;
12477 tp->write32_tx_mbox = tg3_write32;
12478 tp->write32_rx_mbox = tg3_write32;
12480 /* Various workaround register access methods */
12481 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12482 tp->write32 = tg3_write_indirect_reg32;
12483 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12484 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12485 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12487 * Back to back register writes can cause problems on these
12488 * chips, the workaround is to read back all reg writes
12489 * except those to mailbox regs.
12491 * See tg3_write_indirect_reg32().
12493 tp->write32 = tg3_write_flush_reg32;
12497 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12498 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12499 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12500 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12501 tp->write32_rx_mbox = tg3_write_flush_reg32;
12504 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12505 tp->read32 = tg3_read_indirect_reg32;
12506 tp->write32 = tg3_write_indirect_reg32;
12507 tp->read32_mbox = tg3_read_indirect_mbox;
12508 tp->write32_mbox = tg3_write_indirect_mbox;
12509 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12510 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12515 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12516 pci_cmd &= ~PCI_COMMAND_MEMORY;
12517 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12520 tp->read32_mbox = tg3_read32_mbox_5906;
12521 tp->write32_mbox = tg3_write32_mbox_5906;
12522 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12523 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12526 if (tp->write32 == tg3_write_indirect_reg32 ||
12527 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12528 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12529 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12530 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12532 /* Get eeprom hw config before calling tg3_set_power_state().
12533 * In particular, the TG3_FLG2_IS_NIC flag must be
12534 * determined before calling tg3_set_power_state() so that
12535 * we know whether or not to switch out of Vaux power.
12536 * When the flag is set, it means that GPIO1 is used for eeprom
12537 * write protect and also implies that it is a LOM where GPIOs
12538 * are not used to switch power.
12540 tg3_get_eeprom_hw_cfg(tp);
12542 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12543 /* Allow reads and writes to the
12544 * APE register and memory space.
12546 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12547 PCISTATE_ALLOW_APE_SHMEM_WR;
12548 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12552 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12556 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12558 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12559 * GPIO1 driven high will bring 5700's external PHY out of reset.
12560 * It is also used as eeprom write protect on LOMs.
12562 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12563 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12564 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12565 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12566 GRC_LCLCTRL_GPIO_OUTPUT1);
12567 /* Unused GPIO3 must be driven as output on 5752 because there
12568 * are no pull-up resistors on unused GPIO pins.
12570 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12571 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12575 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12577 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12578 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12579 /* Turn off the debug UART. */
12580 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12581 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12582 /* Keep VMain power. */
12583 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12584 GRC_LCLCTRL_GPIO_OUTPUT0;
12587 /* Force the chip into D0. */
12588 err = tg3_set_power_state(tp, PCI_D0);
12590 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12591 pci_name(tp->pdev));
12595 /* Derive initial jumbo mode from MTU assigned in
12596 * ether_setup() via the alloc_etherdev() call
12598 if (tp->dev->mtu > ETH_DATA_LEN &&
12599 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12600 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12602 /* Determine WakeOnLan speed to use. */
12603 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12604 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12605 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12606 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12607 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12609 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12613 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12615 /* A few boards don't want Ethernet@WireSpeed phy feature */
12616 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12617 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12618 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12619 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12620 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12621 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12622 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12624 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12625 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12626 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12627 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12628 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12630 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12631 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12632 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12633 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12638 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12639 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12640 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12641 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12642 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12644 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12648 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12649 tp->phy_otp = tg3_read_otp_phycfg(tp);
12650 if (tp->phy_otp == 0)
12651 tp->phy_otp = TG3_OTP_DEFAULT;
12654 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12655 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12657 tp->mi_mode = MAC_MI_MODE_BASE;
12659 tp->coalesce_mode = 0;
12660 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12661 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12662 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12666 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12668 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12669 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12670 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12671 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12673 err = tg3_mdio_init(tp);
12677 /* Initialize data/descriptor byte/word swapping. */
12678 val = tr32(GRC_MODE);
12679 val &= GRC_MODE_HOST_STACKUP;
12680 tw32(GRC_MODE, val | tp->grc_mode);
12682 tg3_switch_clocks(tp);
12684 /* Clear this out for sanity. */
12685 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12687 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12689 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12690 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12691 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12693 if (chiprevid == CHIPREV_ID_5701_A0 ||
12694 chiprevid == CHIPREV_ID_5701_B0 ||
12695 chiprevid == CHIPREV_ID_5701_B2 ||
12696 chiprevid == CHIPREV_ID_5701_B5) {
12697 void __iomem *sram_base;
12699 /* Write some dummy words into the SRAM status block
12700 * area, see if it reads back correctly. If the return
12701 * value is bad, force enable the PCIX workaround.
12703 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12705 writel(0x00000000, sram_base);
12706 writel(0x00000000, sram_base + 4);
12707 writel(0xffffffff, sram_base + 4);
12708 if (readl(sram_base) != 0x00000000)
12709 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12714 tg3_nvram_init(tp);
12716 grc_misc_cfg = tr32(GRC_MISC_CFG);
12717 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12720 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12721 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12722 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12724 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12725 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12726 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12727 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12728 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12729 HOSTCC_MODE_CLRTICK_TXBD);
12731 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12732 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12733 tp->misc_host_ctrl);
12736 /* Preserve the APE MAC_MODE bits */
12737 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12738 tp->mac_mode = tr32(MAC_MODE) |
12739 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12741 tp->mac_mode = TG3_DEF_MAC_MODE;
12743 /* these are limited to 10/100 only */
12744 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12745 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12746 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12747 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12748 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12749 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12750 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12751 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12752 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12753 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12754 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12756 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
12757 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12759 err = tg3_phy_probe(tp);
12761 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12762 pci_name(tp->pdev), err);
12763 /* ... but do not return immediately ... */
12767 tg3_read_partno(tp);
12768 tg3_read_fw_ver(tp);
12770 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12771 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12774 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12776 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12779 /* 5700 {AX,BX} chips have a broken status block link
12780 * change bit implementation, so we must use the
12781 * status register in those cases.
12783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12784 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12786 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12788 /* The led_ctrl is set during tg3_phy_probe, here we might
12789 * have to force the link status polling mechanism based
12790 * upon subsystem IDs.
12792 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12794 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12795 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12796 TG3_FLAG_USE_LINKCHG_REG);
12799 /* For all SERDES we poll the MAC status register. */
12800 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12801 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12803 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12805 tp->rx_offset = NET_IP_ALIGN;
12806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12807 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12810 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12812 /* Increment the rx prod index on the rx std ring by at most
12813 * 8 for these chips to workaround hw errata.
12815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12818 tp->rx_std_max_post = 8;
12820 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12821 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12822 PCIE_PWR_MGMT_L1_THRESH_MSK;
12827 #ifdef CONFIG_SPARC
12828 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12830 struct net_device *dev = tp->dev;
12831 struct pci_dev *pdev = tp->pdev;
12832 struct device_node *dp = pci_device_to_OF_node(pdev);
12833 const unsigned char *addr;
12836 addr = of_get_property(dp, "local-mac-address", &len);
12837 if (addr && len == 6) {
12838 memcpy(dev->dev_addr, addr, 6);
12839 memcpy(dev->perm_addr, dev->dev_addr, 6);
12845 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12847 struct net_device *dev = tp->dev;
12849 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12850 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12855 static int __devinit tg3_get_device_address(struct tg3 *tp)
12857 struct net_device *dev = tp->dev;
12858 u32 hi, lo, mac_offset;
12861 #ifdef CONFIG_SPARC
12862 if (!tg3_get_macaddr_sparc(tp))
12867 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12868 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12869 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12871 if (tg3_nvram_lock(tp))
12872 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12874 tg3_nvram_unlock(tp);
12876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12879 /* First try to get it from MAC address mailbox. */
12880 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12881 if ((hi >> 16) == 0x484b) {
12882 dev->dev_addr[0] = (hi >> 8) & 0xff;
12883 dev->dev_addr[1] = (hi >> 0) & 0xff;
12885 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12886 dev->dev_addr[2] = (lo >> 24) & 0xff;
12887 dev->dev_addr[3] = (lo >> 16) & 0xff;
12888 dev->dev_addr[4] = (lo >> 8) & 0xff;
12889 dev->dev_addr[5] = (lo >> 0) & 0xff;
12891 /* Some old bootcode may report a 0 MAC address in SRAM */
12892 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12895 /* Next, try NVRAM. */
12896 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
12897 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12898 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12899 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
12900 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
12902 /* Finally just fetch it out of the MAC control regs. */
12904 hi = tr32(MAC_ADDR_0_HIGH);
12905 lo = tr32(MAC_ADDR_0_LOW);
12907 dev->dev_addr[5] = lo & 0xff;
12908 dev->dev_addr[4] = (lo >> 8) & 0xff;
12909 dev->dev_addr[3] = (lo >> 16) & 0xff;
12910 dev->dev_addr[2] = (lo >> 24) & 0xff;
12911 dev->dev_addr[1] = hi & 0xff;
12912 dev->dev_addr[0] = (hi >> 8) & 0xff;
12916 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12917 #ifdef CONFIG_SPARC
12918 if (!tg3_get_default_macaddr_sparc(tp))
12923 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12927 #define BOUNDARY_SINGLE_CACHELINE 1
12928 #define BOUNDARY_MULTI_CACHELINE 2
12930 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12932 int cacheline_size;
12936 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12938 cacheline_size = 1024;
12940 cacheline_size = (int) byte * 4;
12942 /* On 5703 and later chips, the boundary bits have no
12945 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12946 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12947 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12950 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12951 goal = BOUNDARY_MULTI_CACHELINE;
12953 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12954 goal = BOUNDARY_SINGLE_CACHELINE;
12963 /* PCI controllers on most RISC systems tend to disconnect
12964 * when a device tries to burst across a cache-line boundary.
12965 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12967 * Unfortunately, for PCI-E there are only limited
12968 * write-side controls for this, and thus for reads
12969 * we will still get the disconnects. We'll also waste
12970 * these PCI cycles for both read and write for chips
12971 * other than 5700 and 5701 which do not implement the
12974 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12975 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12976 switch (cacheline_size) {
12981 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12982 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12983 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12985 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12986 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12991 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12992 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12996 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12997 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13000 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13001 switch (cacheline_size) {
13005 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13006 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13007 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13013 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13014 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13018 switch (cacheline_size) {
13020 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13021 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13022 DMA_RWCTRL_WRITE_BNDRY_16);
13027 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13028 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13029 DMA_RWCTRL_WRITE_BNDRY_32);
13034 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13035 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13036 DMA_RWCTRL_WRITE_BNDRY_64);
13041 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13042 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13043 DMA_RWCTRL_WRITE_BNDRY_128);
13048 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13049 DMA_RWCTRL_WRITE_BNDRY_256);
13052 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13053 DMA_RWCTRL_WRITE_BNDRY_512);
13057 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13058 DMA_RWCTRL_WRITE_BNDRY_1024);
13067 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13069 struct tg3_internal_buffer_desc test_desc;
13070 u32 sram_dma_descs;
13073 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13075 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13076 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13077 tw32(RDMAC_STATUS, 0);
13078 tw32(WDMAC_STATUS, 0);
13080 tw32(BUFMGR_MODE, 0);
13081 tw32(FTQ_RESET, 0);
13083 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13084 test_desc.addr_lo = buf_dma & 0xffffffff;
13085 test_desc.nic_mbuf = 0x00002100;
13086 test_desc.len = size;
13089 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13090 * the *second* time the tg3 driver was getting loaded after an
13093 * Broadcom tells me:
13094 * ...the DMA engine is connected to the GRC block and a DMA
13095 * reset may affect the GRC block in some unpredictable way...
13096 * The behavior of resets to individual blocks has not been tested.
13098 * Broadcom noted the GRC reset will also reset all sub-components.
13101 test_desc.cqid_sqid = (13 << 8) | 2;
13103 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13106 test_desc.cqid_sqid = (16 << 8) | 7;
13108 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13111 test_desc.flags = 0x00000005;
13113 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13116 val = *(((u32 *)&test_desc) + i);
13117 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13118 sram_dma_descs + (i * sizeof(u32)));
13119 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13121 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13124 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13126 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13130 for (i = 0; i < 40; i++) {
13134 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13136 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13137 if ((val & 0xffff) == sram_dma_descs) {
13148 #define TEST_BUFFER_SIZE 0x2000
13150 static int __devinit tg3_test_dma(struct tg3 *tp)
13152 dma_addr_t buf_dma;
13153 u32 *buf, saved_dma_rwctrl;
13156 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13162 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13163 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13165 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13167 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13168 /* DMA read watermark not used on PCIE */
13169 tp->dma_rwctrl |= 0x00180000;
13170 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13171 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13173 tp->dma_rwctrl |= 0x003f0000;
13175 tp->dma_rwctrl |= 0x003f000f;
13177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13179 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13180 u32 read_water = 0x7;
13182 /* If the 5704 is behind the EPB bridge, we can
13183 * do the less restrictive ONE_DMA workaround for
13184 * better performance.
13186 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13188 tp->dma_rwctrl |= 0x8000;
13189 else if (ccval == 0x6 || ccval == 0x7)
13190 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13194 /* Set bit 23 to enable PCIX hw bug fix */
13196 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13197 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13199 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13200 /* 5780 always in PCIX mode */
13201 tp->dma_rwctrl |= 0x00144000;
13202 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13203 /* 5714 always in PCIX mode */
13204 tp->dma_rwctrl |= 0x00148000;
13206 tp->dma_rwctrl |= 0x001b000f;
13210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13212 tp->dma_rwctrl &= 0xfffffff0;
13214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13216 /* Remove this if it causes problems for some boards. */
13217 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13219 /* On 5700/5701 chips, we need to set this bit.
13220 * Otherwise the chip will issue cacheline transactions
13221 * to streamable DMA memory with not all the byte
13222 * enables turned on. This is an error on several
13223 * RISC PCI controllers, in particular sparc64.
13225 * On 5703/5704 chips, this bit has been reassigned
13226 * a different meaning. In particular, it is used
13227 * on those chips to enable a PCI-X workaround.
13229 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13232 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13235 /* Unneeded, already done by tg3_get_invariants. */
13236 tg3_switch_clocks(tp);
13240 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13241 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13244 /* It is best to perform DMA test with maximum write burst size
13245 * to expose the 5700/5701 write DMA bug.
13247 saved_dma_rwctrl = tp->dma_rwctrl;
13248 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13249 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13254 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13257 /* Send the buffer to the chip. */
13258 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13260 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13265 /* validate data reached card RAM correctly. */
13266 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13268 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13269 if (le32_to_cpu(val) != p[i]) {
13270 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13271 /* ret = -ENODEV here? */
13276 /* Now read it back. */
13277 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13279 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13285 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13289 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13290 DMA_RWCTRL_WRITE_BNDRY_16) {
13291 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13292 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13293 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13296 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13302 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13308 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13309 DMA_RWCTRL_WRITE_BNDRY_16) {
13310 static struct pci_device_id dma_wait_state_chipsets[] = {
13311 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13312 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13316 /* DMA test passed without adjusting DMA boundary,
13317 * now look for chipsets that are known to expose the
13318 * DMA bug without failing the test.
13320 if (pci_dev_present(dma_wait_state_chipsets)) {
13321 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13322 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13325 /* Safe to use the calculated DMA boundary. */
13326 tp->dma_rwctrl = saved_dma_rwctrl;
13328 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13332 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13337 static void __devinit tg3_init_link_config(struct tg3 *tp)
13339 tp->link_config.advertising =
13340 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13341 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13342 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13343 ADVERTISED_Autoneg | ADVERTISED_MII);
13344 tp->link_config.speed = SPEED_INVALID;
13345 tp->link_config.duplex = DUPLEX_INVALID;
13346 tp->link_config.autoneg = AUTONEG_ENABLE;
13347 tp->link_config.active_speed = SPEED_INVALID;
13348 tp->link_config.active_duplex = DUPLEX_INVALID;
13349 tp->link_config.phy_is_low_power = 0;
13350 tp->link_config.orig_speed = SPEED_INVALID;
13351 tp->link_config.orig_duplex = DUPLEX_INVALID;
13352 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13355 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13357 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13358 tp->bufmgr_config.mbuf_read_dma_low_water =
13359 DEFAULT_MB_RDMA_LOW_WATER_5705;
13360 tp->bufmgr_config.mbuf_mac_rx_low_water =
13361 DEFAULT_MB_MACRX_LOW_WATER_5705;
13362 tp->bufmgr_config.mbuf_high_water =
13363 DEFAULT_MB_HIGH_WATER_5705;
13364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13365 tp->bufmgr_config.mbuf_mac_rx_low_water =
13366 DEFAULT_MB_MACRX_LOW_WATER_5906;
13367 tp->bufmgr_config.mbuf_high_water =
13368 DEFAULT_MB_HIGH_WATER_5906;
13371 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13372 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13373 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13374 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13375 tp->bufmgr_config.mbuf_high_water_jumbo =
13376 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13378 tp->bufmgr_config.mbuf_read_dma_low_water =
13379 DEFAULT_MB_RDMA_LOW_WATER;
13380 tp->bufmgr_config.mbuf_mac_rx_low_water =
13381 DEFAULT_MB_MACRX_LOW_WATER;
13382 tp->bufmgr_config.mbuf_high_water =
13383 DEFAULT_MB_HIGH_WATER;
13385 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13386 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13387 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13388 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13389 tp->bufmgr_config.mbuf_high_water_jumbo =
13390 DEFAULT_MB_HIGH_WATER_JUMBO;
13393 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13394 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13397 static char * __devinit tg3_phy_string(struct tg3 *tp)
13399 switch (tp->phy_id & PHY_ID_MASK) {
13400 case PHY_ID_BCM5400: return "5400";
13401 case PHY_ID_BCM5401: return "5401";
13402 case PHY_ID_BCM5411: return "5411";
13403 case PHY_ID_BCM5701: return "5701";
13404 case PHY_ID_BCM5703: return "5703";
13405 case PHY_ID_BCM5704: return "5704";
13406 case PHY_ID_BCM5705: return "5705";
13407 case PHY_ID_BCM5750: return "5750";
13408 case PHY_ID_BCM5752: return "5752";
13409 case PHY_ID_BCM5714: return "5714";
13410 case PHY_ID_BCM5780: return "5780";
13411 case PHY_ID_BCM5755: return "5755";
13412 case PHY_ID_BCM5787: return "5787";
13413 case PHY_ID_BCM5784: return "5784";
13414 case PHY_ID_BCM5756: return "5722/5756";
13415 case PHY_ID_BCM5906: return "5906";
13416 case PHY_ID_BCM5761: return "5761";
13417 case PHY_ID_BCM8002: return "8002/serdes";
13418 case 0: return "serdes";
13419 default: return "unknown";
13423 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13425 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13426 strcpy(str, "PCI Express");
13428 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13429 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13431 strcpy(str, "PCIX:");
13433 if ((clock_ctrl == 7) ||
13434 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13435 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13436 strcat(str, "133MHz");
13437 else if (clock_ctrl == 0)
13438 strcat(str, "33MHz");
13439 else if (clock_ctrl == 2)
13440 strcat(str, "50MHz");
13441 else if (clock_ctrl == 4)
13442 strcat(str, "66MHz");
13443 else if (clock_ctrl == 6)
13444 strcat(str, "100MHz");
13446 strcpy(str, "PCI:");
13447 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13448 strcat(str, "66MHz");
13450 strcat(str, "33MHz");
13452 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13453 strcat(str, ":32-bit");
13455 strcat(str, ":64-bit");
13459 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13461 struct pci_dev *peer;
13462 unsigned int func, devnr = tp->pdev->devfn & ~7;
13464 for (func = 0; func < 8; func++) {
13465 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13466 if (peer && peer != tp->pdev)
13470 /* 5704 can be configured in single-port mode, set peer to
13471 * tp->pdev in that case.
13479 * We don't need to keep the refcount elevated; there's no way
13480 * to remove one half of this device without removing the other
13487 static void __devinit tg3_init_coal(struct tg3 *tp)
13489 struct ethtool_coalesce *ec = &tp->coal;
13491 memset(ec, 0, sizeof(*ec));
13492 ec->cmd = ETHTOOL_GCOALESCE;
13493 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13494 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13495 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13496 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13497 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13498 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13499 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13500 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13501 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13503 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13504 HOSTCC_MODE_CLRTICK_TXBD)) {
13505 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13506 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13507 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13508 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13511 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13512 ec->rx_coalesce_usecs_irq = 0;
13513 ec->tx_coalesce_usecs_irq = 0;
13514 ec->stats_block_coalesce_usecs = 0;
13518 static const struct net_device_ops tg3_netdev_ops = {
13519 .ndo_open = tg3_open,
13520 .ndo_stop = tg3_close,
13521 .ndo_start_xmit = tg3_start_xmit,
13522 .ndo_get_stats = tg3_get_stats,
13523 .ndo_validate_addr = eth_validate_addr,
13524 .ndo_set_multicast_list = tg3_set_rx_mode,
13525 .ndo_set_mac_address = tg3_set_mac_addr,
13526 .ndo_do_ioctl = tg3_ioctl,
13527 .ndo_tx_timeout = tg3_tx_timeout,
13528 .ndo_change_mtu = tg3_change_mtu,
13529 #if TG3_VLAN_TAG_USED
13530 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13532 #ifdef CONFIG_NET_POLL_CONTROLLER
13533 .ndo_poll_controller = tg3_poll_controller,
13537 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13538 .ndo_open = tg3_open,
13539 .ndo_stop = tg3_close,
13540 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13541 .ndo_get_stats = tg3_get_stats,
13542 .ndo_validate_addr = eth_validate_addr,
13543 .ndo_set_multicast_list = tg3_set_rx_mode,
13544 .ndo_set_mac_address = tg3_set_mac_addr,
13545 .ndo_do_ioctl = tg3_ioctl,
13546 .ndo_tx_timeout = tg3_tx_timeout,
13547 .ndo_change_mtu = tg3_change_mtu,
13548 #if TG3_VLAN_TAG_USED
13549 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13551 #ifdef CONFIG_NET_POLL_CONTROLLER
13552 .ndo_poll_controller = tg3_poll_controller,
13556 static int __devinit tg3_init_one(struct pci_dev *pdev,
13557 const struct pci_device_id *ent)
13559 static int tg3_version_printed = 0;
13560 struct net_device *dev;
13562 int i, err, pm_cap;
13563 u32 sndmbx, rcvmbx, intmbx;
13565 u64 dma_mask, persist_dma_mask;
13567 if (tg3_version_printed++ == 0)
13568 printk(KERN_INFO "%s", version);
13570 err = pci_enable_device(pdev);
13572 printk(KERN_ERR PFX "Cannot enable PCI device, "
13577 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13579 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13581 goto err_out_disable_pdev;
13584 pci_set_master(pdev);
13586 /* Find power-management capability. */
13587 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13589 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13592 goto err_out_free_res;
13595 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
13597 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13599 goto err_out_free_res;
13602 SET_NETDEV_DEV(dev, &pdev->dev);
13604 #if TG3_VLAN_TAG_USED
13605 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13608 tp = netdev_priv(dev);
13611 tp->pm_cap = pm_cap;
13612 tp->rx_mode = TG3_DEF_RX_MODE;
13613 tp->tx_mode = TG3_DEF_TX_MODE;
13616 tp->msg_enable = tg3_debug;
13618 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13620 /* The word/byte swap controls here control register access byte
13621 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13624 tp->misc_host_ctrl =
13625 MISC_HOST_CTRL_MASK_PCI_INT |
13626 MISC_HOST_CTRL_WORD_SWAP |
13627 MISC_HOST_CTRL_INDIR_ACCESS |
13628 MISC_HOST_CTRL_PCISTATE_RW;
13630 /* The NONFRM (non-frame) byte/word swap controls take effect
13631 * on descriptor entries, anything which isn't packet data.
13633 * The StrongARM chips on the board (one for tx, one for rx)
13634 * are running in big-endian mode.
13636 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13637 GRC_MODE_WSWAP_NONFRM_DATA);
13638 #ifdef __BIG_ENDIAN
13639 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13641 spin_lock_init(&tp->lock);
13642 spin_lock_init(&tp->indirect_lock);
13643 INIT_WORK(&tp->reset_task, tg3_reset_task);
13645 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13647 printk(KERN_ERR PFX "Cannot map device registers, "
13650 goto err_out_free_dev;
13653 tg3_init_link_config(tp);
13655 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13656 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13658 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13659 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13660 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13661 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13662 struct tg3_napi *tnapi = &tp->napi[i];
13665 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
13667 tnapi->int_mbox = intmbx;
13673 tnapi->consmbox = rcvmbx;
13674 tnapi->prodmbox = sndmbx;
13677 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
13679 tnapi->coal_now = HOSTCC_MODE_NOW;
13681 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
13685 * If we support MSIX, we'll be using RSS. If we're using
13686 * RSS, the first vector only handles link interrupts and the
13687 * remaining vectors handle rx and tx interrupts. Reuse the
13688 * mailbox values for the next iteration. The values we setup
13689 * above are still useful for the single vectored mode.
13702 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13703 dev->ethtool_ops = &tg3_ethtool_ops;
13704 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13705 dev->irq = pdev->irq;
13707 err = tg3_get_invariants(tp);
13709 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13711 goto err_out_iounmap;
13714 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13716 dev->netdev_ops = &tg3_netdev_ops;
13718 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13721 /* The EPB bridge inside 5714, 5715, and 5780 and any
13722 * device behind the EPB cannot support DMA addresses > 40-bit.
13723 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13724 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13725 * do DMA address check in tg3_start_xmit().
13727 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13728 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
13729 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13730 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
13731 #ifdef CONFIG_HIGHMEM
13732 dma_mask = DMA_BIT_MASK(64);
13735 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
13737 /* Configure DMA attributes. */
13738 if (dma_mask > DMA_BIT_MASK(32)) {
13739 err = pci_set_dma_mask(pdev, dma_mask);
13741 dev->features |= NETIF_F_HIGHDMA;
13742 err = pci_set_consistent_dma_mask(pdev,
13745 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13746 "DMA for consistent allocations\n");
13747 goto err_out_iounmap;
13751 if (err || dma_mask == DMA_BIT_MASK(32)) {
13752 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
13754 printk(KERN_ERR PFX "No usable DMA configuration, "
13756 goto err_out_iounmap;
13760 tg3_init_bufmgr_config(tp);
13762 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13763 tp->fw_needed = FIRMWARE_TG3;
13765 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13766 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13768 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13770 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13771 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13772 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13773 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13775 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13777 tp->fw_needed = FIRMWARE_TG3TSO5;
13779 tp->fw_needed = FIRMWARE_TG3TSO;
13782 /* TSO is on by default on chips that support hardware TSO.
13783 * Firmware TSO on older chips gives lower performance, so it
13784 * is off by default, but can be enabled using ethtool.
13786 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13787 if (dev->features & NETIF_F_IP_CSUM)
13788 dev->features |= NETIF_F_TSO;
13789 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13790 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13791 dev->features |= NETIF_F_TSO6;
13792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13793 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13794 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13796 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13797 dev->features |= NETIF_F_TSO_ECN;
13801 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13802 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13803 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13804 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13805 tp->rx_pending = 63;
13808 err = tg3_get_device_address(tp);
13810 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13815 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13816 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13817 if (!tp->aperegs) {
13818 printk(KERN_ERR PFX "Cannot map APE registers, "
13824 tg3_ape_lock_init(tp);
13826 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
13827 tg3_read_dash_ver(tp);
13831 * Reset chip in case UNDI or EFI driver did not shutdown
13832 * DMA self test will enable WDMAC and we'll see (spurious)
13833 * pending DMA on the PCI bus at that point.
13835 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13836 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13837 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13838 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13841 err = tg3_test_dma(tp);
13843 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13844 goto err_out_apeunmap;
13847 /* flow control autonegotiation is default behavior */
13848 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13849 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13853 pci_set_drvdata(pdev, dev);
13855 err = register_netdev(dev);
13857 printk(KERN_ERR PFX "Cannot register net device, "
13859 goto err_out_apeunmap;
13862 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13864 tp->board_part_number,
13865 tp->pci_chip_rev_id,
13866 tg3_bus_string(tp, str),
13869 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13871 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13873 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13874 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13877 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13878 tp->dev->name, tg3_phy_string(tp),
13879 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13880 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13881 "10/100/1000Base-T")),
13882 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13884 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13886 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13887 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13888 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13889 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13890 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13891 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13892 dev->name, tp->dma_rwctrl,
13893 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
13894 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
13900 iounmap(tp->aperegs);
13901 tp->aperegs = NULL;
13906 release_firmware(tp->fw);
13918 pci_release_regions(pdev);
13920 err_out_disable_pdev:
13921 pci_disable_device(pdev);
13922 pci_set_drvdata(pdev, NULL);
13926 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13928 struct net_device *dev = pci_get_drvdata(pdev);
13931 struct tg3 *tp = netdev_priv(dev);
13934 release_firmware(tp->fw);
13936 flush_scheduled_work();
13938 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13943 unregister_netdev(dev);
13945 iounmap(tp->aperegs);
13946 tp->aperegs = NULL;
13953 pci_release_regions(pdev);
13954 pci_disable_device(pdev);
13955 pci_set_drvdata(pdev, NULL);
13959 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13961 struct net_device *dev = pci_get_drvdata(pdev);
13962 struct tg3 *tp = netdev_priv(dev);
13963 pci_power_t target_state;
13966 /* PCI register 4 needs to be saved whether netif_running() or not.
13967 * MSI address and data need to be saved if using MSI and
13970 pci_save_state(pdev);
13972 if (!netif_running(dev))
13975 flush_scheduled_work();
13977 tg3_netif_stop(tp);
13979 del_timer_sync(&tp->timer);
13981 tg3_full_lock(tp, 1);
13982 tg3_disable_ints(tp);
13983 tg3_full_unlock(tp);
13985 netif_device_detach(dev);
13987 tg3_full_lock(tp, 0);
13988 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13989 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13990 tg3_full_unlock(tp);
13992 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13994 err = tg3_set_power_state(tp, target_state);
13998 tg3_full_lock(tp, 0);
14000 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14001 err2 = tg3_restart_hw(tp, 1);
14005 tp->timer.expires = jiffies + tp->timer_offset;
14006 add_timer(&tp->timer);
14008 netif_device_attach(dev);
14009 tg3_netif_start(tp);
14012 tg3_full_unlock(tp);
14021 static int tg3_resume(struct pci_dev *pdev)
14023 struct net_device *dev = pci_get_drvdata(pdev);
14024 struct tg3 *tp = netdev_priv(dev);
14027 pci_restore_state(tp->pdev);
14029 if (!netif_running(dev))
14032 err = tg3_set_power_state(tp, PCI_D0);
14036 netif_device_attach(dev);
14038 tg3_full_lock(tp, 0);
14040 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14041 err = tg3_restart_hw(tp, 1);
14045 tp->timer.expires = jiffies + tp->timer_offset;
14046 add_timer(&tp->timer);
14048 tg3_netif_start(tp);
14051 tg3_full_unlock(tp);
14059 static struct pci_driver tg3_driver = {
14060 .name = DRV_MODULE_NAME,
14061 .id_table = tg3_pci_tbl,
14062 .probe = tg3_init_one,
14063 .remove = __devexit_p(tg3_remove_one),
14064 .suspend = tg3_suspend,
14065 .resume = tg3_resume
14068 static int __init tg3_init(void)
14070 return pci_register_driver(&tg3_driver);
14073 static void __exit tg3_cleanup(void)
14075 pci_unregister_driver(&tg3_driver);
14078 module_init(tg3_init);
14079 module_exit(tg3_cleanup);