2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
44 #include <asm/system.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
62 #define TG3_TSO_SUPPORT 1
64 #define TG3_TSO_SUPPORT 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.64"
72 #define DRV_MODULE_RELDATE "July 31, 2006"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 #define TX_BUFFS_AVAIL(TP) \
127 ((TP)->tx_pending - \
128 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140 #define TG3_NUM_TEST 6
142 static char version[] __devinitdata =
143 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
150 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154 static struct pci_device_id tg3_pci_tbl[] = {
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
244 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
246 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
248 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
249 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
250 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
252 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
254 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
256 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
258 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
260 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
262 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
264 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
268 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
271 const char string[ETH_GSTRING_LEN];
272 } ethtool_stats_keys[TG3_NUM_STATS] = {
275 { "rx_ucast_packets" },
276 { "rx_mcast_packets" },
277 { "rx_bcast_packets" },
279 { "rx_align_errors" },
280 { "rx_xon_pause_rcvd" },
281 { "rx_xoff_pause_rcvd" },
282 { "rx_mac_ctrl_rcvd" },
283 { "rx_xoff_entered" },
284 { "rx_frame_too_long_errors" },
286 { "rx_undersize_packets" },
287 { "rx_in_length_errors" },
288 { "rx_out_length_errors" },
289 { "rx_64_or_less_octet_packets" },
290 { "rx_65_to_127_octet_packets" },
291 { "rx_128_to_255_octet_packets" },
292 { "rx_256_to_511_octet_packets" },
293 { "rx_512_to_1023_octet_packets" },
294 { "rx_1024_to_1522_octet_packets" },
295 { "rx_1523_to_2047_octet_packets" },
296 { "rx_2048_to_4095_octet_packets" },
297 { "rx_4096_to_8191_octet_packets" },
298 { "rx_8192_to_9022_octet_packets" },
305 { "tx_flow_control" },
307 { "tx_single_collisions" },
308 { "tx_mult_collisions" },
310 { "tx_excessive_collisions" },
311 { "tx_late_collisions" },
312 { "tx_collide_2times" },
313 { "tx_collide_3times" },
314 { "tx_collide_4times" },
315 { "tx_collide_5times" },
316 { "tx_collide_6times" },
317 { "tx_collide_7times" },
318 { "tx_collide_8times" },
319 { "tx_collide_9times" },
320 { "tx_collide_10times" },
321 { "tx_collide_11times" },
322 { "tx_collide_12times" },
323 { "tx_collide_13times" },
324 { "tx_collide_14times" },
325 { "tx_collide_15times" },
326 { "tx_ucast_packets" },
327 { "tx_mcast_packets" },
328 { "tx_bcast_packets" },
329 { "tx_carrier_sense_errors" },
333 { "dma_writeq_full" },
334 { "dma_write_prioq_full" },
338 { "rx_threshold_hit" },
340 { "dma_readq_full" },
341 { "dma_read_prioq_full" },
342 { "tx_comp_queue_full" },
344 { "ring_set_send_prod_index" },
345 { "ring_status_update" },
347 { "nic_avoided_irqs" },
348 { "nic_tx_threshold_hit" }
352 const char string[ETH_GSTRING_LEN];
353 } ethtool_test_keys[TG3_NUM_TEST] = {
354 { "nvram test (online) " },
355 { "link test (online) " },
356 { "register test (offline)" },
357 { "memory test (offline)" },
358 { "loopback test (offline)" },
359 { "interrupt test (offline)" },
362 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364 writel(val, tp->regs + off);
367 static u32 tg3_read32(struct tg3 *tp, u32 off)
369 return (readl(tp->regs + off));
372 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
376 spin_lock_irqsave(&tp->indirect_lock, flags);
377 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379 spin_unlock_irqrestore(&tp->indirect_lock, flags);
382 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384 writel(val, tp->regs + off);
385 readl(tp->regs + off);
388 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
400 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
404 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
405 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
406 TG3_64BIT_REG_LOW, val);
409 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
410 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
411 TG3_64BIT_REG_LOW, val);
415 spin_lock_irqsave(&tp->indirect_lock, flags);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
418 spin_unlock_irqrestore(&tp->indirect_lock, flags);
420 /* In indirect mode when disabling interrupts, we also need
421 * to clear the interrupt bit in the GRC local ctrl register.
423 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
426 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
430 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
435 spin_lock_irqsave(&tp->indirect_lock, flags);
436 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
437 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
438 spin_unlock_irqrestore(&tp->indirect_lock, flags);
442 /* usec_wait specifies the wait time in usec when writing to certain registers
443 * where it is unsafe to read back the register without some delay.
444 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
445 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
449 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
450 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451 /* Non-posted methods */
452 tp->write32(tp, off, val);
455 tg3_write32(tp, off, val);
460 /* Wait again after the read for the posted method to guarantee that
461 * the wait time is met.
467 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469 tp->write32_mbox(tp, off, val);
470 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
471 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472 tp->read32_mbox(tp, off);
475 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
477 void __iomem *mbox = tp->regs + off;
479 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
485 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
491 #define tw32(reg,val) tp->write32(tp, reg, val)
492 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg) tp->read32(tp, reg)
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
505 /* Always leave this as zero. */
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509 tw32_f(TG3PCI_MEM_WIN_DATA, val);
511 /* Always leave this as zero. */
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
521 spin_lock_irqsave(&tp->indirect_lock, flags);
522 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
523 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
524 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
526 /* Always leave this as zero. */
527 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
530 *val = tr32(TG3PCI_MEM_WIN_DATA);
532 /* Always leave this as zero. */
533 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 static void tg3_disable_ints(struct tg3 *tp)
540 tw32(TG3PCI_MISC_HOST_CTRL,
541 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
542 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
545 static inline void tg3_cond_int(struct tg3 *tp)
547 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
548 (tp->hw_status->status & SD_STATUS_UPDATED))
549 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
552 static void tg3_enable_ints(struct tg3 *tp)
557 tw32(TG3PCI_MISC_HOST_CTRL,
558 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
559 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560 (tp->last_tag << 24));
561 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
562 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563 (tp->last_tag << 24));
567 static inline unsigned int tg3_has_work(struct tg3 *tp)
569 struct tg3_hw_status *sblk = tp->hw_status;
570 unsigned int work_exists = 0;
572 /* check for phy events */
573 if (!(tp->tg3_flags &
574 (TG3_FLAG_USE_LINKCHG_REG |
575 TG3_FLAG_POLL_SERDES))) {
576 if (sblk->status & SD_STATUS_LINK_CHG)
579 /* check for RX/TX work to do */
580 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
581 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
588 * similar to tg3_enable_ints, but it accurately determines whether there
589 * is new work pending and can return without flushing the PIO write
590 * which reenables interrupts
592 static void tg3_restart_ints(struct tg3 *tp)
594 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
598 /* When doing tagged status, this work check is unnecessary.
599 * The last_tag we write above tells the chip which piece of
600 * work we've completed.
602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604 tw32(HOSTCC_MODE, tp->coalesce_mode |
605 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 static inline void tg3_netif_stop(struct tg3 *tp)
610 tp->dev->trans_start = jiffies; /* prevent tx timeout */
611 netif_poll_disable(tp->dev);
612 netif_tx_disable(tp->dev);
615 static inline void tg3_netif_start(struct tg3 *tp)
617 netif_wake_queue(tp->dev);
618 /* NOTE: unconditional netif_wake_queue is only appropriate
619 * so long as all callers are assured to have free tx slots
620 * (such as after tg3_init_hw)
622 netif_poll_enable(tp->dev);
623 tp->hw_status->status |= SD_STATUS_UPDATED;
627 static void tg3_switch_clocks(struct tg3 *tp)
629 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
632 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
635 orig_clock_ctrl = clock_ctrl;
636 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
637 CLOCK_CTRL_CLKRUN_OENABLE |
639 tp->pci_clock_ctrl = clock_ctrl;
641 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
642 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
643 tw32_wait_f(TG3PCI_CLOCK_CTRL,
644 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
646 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
647 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651 tw32_wait_f(TG3PCI_CLOCK_CTRL,
652 clock_ctrl | (CLOCK_CTRL_ALTCLK),
655 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
658 #define PHY_BUSY_LOOPS 5000
660 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
666 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
674 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
675 MI_COM_PHY_ADDR_MASK);
676 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
677 MI_COM_REG_ADDR_MASK);
678 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680 tw32_f(MAC_MI_COM, frame_val);
682 loops = PHY_BUSY_LOOPS;
685 frame_val = tr32(MAC_MI_COM);
687 if ((frame_val & MI_COM_BUSY) == 0) {
689 frame_val = tr32(MAC_MI_COM);
697 *val = frame_val & MI_COM_DATA_MASK;
701 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
702 tw32_f(MAC_MI_MODE, tp->mi_mode);
709 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
721 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
722 MI_COM_PHY_ADDR_MASK);
723 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
724 MI_COM_REG_ADDR_MASK);
725 frame_val |= (val & MI_COM_DATA_MASK);
726 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728 tw32_f(MAC_MI_COM, frame_val);
730 loops = PHY_BUSY_LOOPS;
733 frame_val = tr32(MAC_MI_COM);
734 if ((frame_val & MI_COM_BUSY) == 0) {
736 frame_val = tr32(MAC_MI_COM);
746 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
747 tw32_f(MAC_MI_MODE, tp->mi_mode);
754 static void tg3_phy_set_wirespeed(struct tg3 *tp)
758 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
761 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
762 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
763 tg3_writephy(tp, MII_TG3_AUX_CTRL,
764 (val | (1 << 15) | (1 << 4)));
767 static int tg3_bmcr_reset(struct tg3 *tp)
772 /* OK, reset it, and poll the BMCR_RESET bit until it
773 * clears or we time out.
775 phy_control = BMCR_RESET;
776 err = tg3_writephy(tp, MII_BMCR, phy_control);
782 err = tg3_readphy(tp, MII_BMCR, &phy_control);
786 if ((phy_control & BMCR_RESET) == 0) {
798 static int tg3_wait_macro_done(struct tg3 *tp)
805 if (!tg3_readphy(tp, 0x16, &tmp32)) {
806 if ((tmp32 & 0x1000) == 0)
816 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818 static const u32 test_pat[4][6] = {
819 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
820 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
821 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
822 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
826 for (chan = 0; chan < 4; chan++) {
829 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
830 (chan * 0x2000) | 0x0200);
831 tg3_writephy(tp, 0x16, 0x0002);
833 for (i = 0; i < 6; i++)
834 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
837 tg3_writephy(tp, 0x16, 0x0202);
838 if (tg3_wait_macro_done(tp)) {
843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
844 (chan * 0x2000) | 0x0200);
845 tg3_writephy(tp, 0x16, 0x0082);
846 if (tg3_wait_macro_done(tp)) {
851 tg3_writephy(tp, 0x16, 0x0802);
852 if (tg3_wait_macro_done(tp)) {
857 for (i = 0; i < 6; i += 2) {
860 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
861 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
862 tg3_wait_macro_done(tp)) {
868 if (low != test_pat[chan][i] ||
869 high != test_pat[chan][i+1]) {
870 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
871 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
882 static int tg3_phy_reset_chanpat(struct tg3 *tp)
886 for (chan = 0; chan < 4; chan++) {
889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
890 (chan * 0x2000) | 0x0200);
891 tg3_writephy(tp, 0x16, 0x0002);
892 for (i = 0; i < 6; i++)
893 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
894 tg3_writephy(tp, 0x16, 0x0202);
895 if (tg3_wait_macro_done(tp))
902 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904 u32 reg32, phy9_orig;
905 int retries, do_phy_reset, err;
911 err = tg3_bmcr_reset(tp);
917 /* Disable transmitter and interrupt. */
918 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
922 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924 /* Set full-duplex, 1000 mbps. */
925 tg3_writephy(tp, MII_BMCR,
926 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928 /* Set to master mode. */
929 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
932 tg3_writephy(tp, MII_TG3_CTRL,
933 (MII_TG3_CTRL_AS_MASTER |
934 MII_TG3_CTRL_ENABLE_AS_MASTER));
936 /* Enable SM_DSP_CLOCK and 6dB. */
937 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939 /* Block the PHY control access. */
940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
948 err = tg3_phy_reset_chanpat(tp);
952 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
953 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
956 tg3_writephy(tp, 0x16, 0x0000);
958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
960 /* Set Extended packet length bit for jumbo frames */
961 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
964 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
967 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
971 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
978 static void tg3_link_report(struct tg3 *);
980 /* This will reset the tigon3 PHY if there is no valid
981 * link unless the FORCE argument is non-zero.
983 static int tg3_phy_reset(struct tg3 *tp)
988 err = tg3_readphy(tp, MII_BMSR, &phy_status);
989 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
993 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
994 netif_carrier_off(tp->dev);
998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1001 err = tg3_phy_reset_5703_4_5(tp);
1007 err = tg3_bmcr_reset(tp);
1012 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1013 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1014 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1015 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1016 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1017 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1018 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1021 tg3_writephy(tp, 0x1c, 0x8d68);
1022 tg3_writephy(tp, 0x1c, 0x8d68);
1024 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1025 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1027 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1028 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1029 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1030 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1031 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1032 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1035 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1037 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1038 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040 /* Set Extended packet length bit (bit 14) on all chips that */
1041 /* support jumbo frames */
1042 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1043 /* Cannot do read-modify-write on 5401 */
1044 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1045 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1048 /* Set bit 14 with read-modify-write to preserve other bits */
1049 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1050 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1051 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1054 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1055 * jumbo frames transmission.
1057 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1061 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1062 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1065 tg3_phy_set_wirespeed(tp);
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1071 struct tg3 *tp_peer = tp;
1073 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1076 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078 struct net_device *dev_peer;
1080 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081 /* remove_one() may have been run on the peer. */
1085 tp_peer = netdev_priv(dev_peer);
1088 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095 (GRC_LCLCTRL_GPIO_OE0 |
1096 GRC_LCLCTRL_GPIO_OE1 |
1097 GRC_LCLCTRL_GPIO_OE2 |
1098 GRC_LCLCTRL_GPIO_OUTPUT0 |
1099 GRC_LCLCTRL_GPIO_OUTPUT1),
1103 u32 grc_local_ctrl = 0;
1105 if (tp_peer != tp &&
1106 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1109 /* Workaround to prevent overdrawing Amps. */
1110 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114 grc_local_ctrl, 100);
1117 /* On 5753 and variants, GPIO2 cannot be used. */
1118 no_gpio2 = tp->nic_sram_data_cfg &
1119 NIC_SRAM_DATA_CFG_NO_GPIO2;
1121 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122 GRC_LCLCTRL_GPIO_OE1 |
1123 GRC_LCLCTRL_GPIO_OE2 |
1124 GRC_LCLCTRL_GPIO_OUTPUT1 |
1125 GRC_LCLCTRL_GPIO_OUTPUT2;
1127 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128 GRC_LCLCTRL_GPIO_OUTPUT2);
1130 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131 grc_local_ctrl, 100);
1133 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136 grc_local_ctrl, 100);
1139 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141 grc_local_ctrl, 100);
1145 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147 if (tp_peer != tp &&
1148 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152 (GRC_LCLCTRL_GPIO_OE1 |
1153 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1155 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156 GRC_LCLCTRL_GPIO_OE1, 100);
1158 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159 (GRC_LCLCTRL_GPIO_OE1 |
1160 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1165 static int tg3_setup_phy(struct tg3 *, int);
1167 #define RESET_KIND_SHUTDOWN 0
1168 #define RESET_KIND_INIT 1
1169 #define RESET_KIND_SUSPEND 2
1171 static void tg3_write_sig_post_reset(struct tg3 *, int);
1172 static int tg3_halt_cpu(struct tg3 *, u32);
1173 static int tg3_nvram_lock(struct tg3 *);
1174 static void tg3_nvram_unlock(struct tg3 *);
1176 static void tg3_power_down_phy(struct tg3 *tp)
1178 /* The PHY should not be powered down on some chips because
1181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1183 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1184 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1189 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1192 u16 power_control, power_caps;
1193 int pm = tp->pm_cap;
1195 /* Make sure register accesses (indirect or otherwise)
1196 * will function correctly.
1198 pci_write_config_dword(tp->pdev,
1199 TG3PCI_MISC_HOST_CTRL,
1200 tp->misc_host_ctrl);
1202 pci_read_config_word(tp->pdev,
1205 power_control |= PCI_PM_CTRL_PME_STATUS;
1206 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1210 pci_write_config_word(tp->pdev,
1213 udelay(100); /* Delay after power state change */
1215 /* Switch out of Vaux if it is not a LOM */
1216 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1217 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1234 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236 tp->dev->name, state);
1240 power_control |= PCI_PM_CTRL_PME_ENABLE;
1242 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1243 tw32(TG3PCI_MISC_HOST_CTRL,
1244 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246 if (tp->link_config.phy_is_low_power == 0) {
1247 tp->link_config.phy_is_low_power = 1;
1248 tp->link_config.orig_speed = tp->link_config.speed;
1249 tp->link_config.orig_duplex = tp->link_config.duplex;
1250 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1253 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1254 tp->link_config.speed = SPEED_10;
1255 tp->link_config.duplex = DUPLEX_HALF;
1256 tp->link_config.autoneg = AUTONEG_ENABLE;
1257 tg3_setup_phy(tp, 0);
1260 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1264 for (i = 0; i < 200; i++) {
1265 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1266 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1271 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1272 WOL_DRV_STATE_SHUTDOWN |
1273 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1280 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1281 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1284 mac_mode = MAC_MODE_PORT_MODE_MII;
1286 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1287 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1288 mac_mode |= MAC_MODE_LINK_POLARITY;
1290 mac_mode = MAC_MODE_PORT_MODE_TBI;
1293 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1294 tw32(MAC_LED_CTRL, tp->led_ctrl);
1296 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1297 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1298 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300 tw32_f(MAC_MODE, mac_mode);
1303 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1307 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1308 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1312 base_val = tp->pci_clock_ctrl;
1313 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1314 CLOCK_CTRL_TXCLK_DISABLE);
1316 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1317 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1318 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1320 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1321 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1322 u32 newbits1, newbits2;
1324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1326 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1327 CLOCK_CTRL_TXCLK_DISABLE |
1329 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1330 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1331 newbits1 = CLOCK_CTRL_625_CORE;
1332 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334 newbits1 = CLOCK_CTRL_ALTCLK;
1335 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1338 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1341 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1344 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1349 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1350 CLOCK_CTRL_TXCLK_DISABLE |
1351 CLOCK_CTRL_44MHZ_CORE);
1353 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1356 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1357 tp->pci_clock_ctrl | newbits3, 40);
1361 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1362 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1363 /* Turn off the PHY */
1364 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1365 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1366 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1367 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1368 tg3_power_down_phy(tp);
1372 tg3_frob_aux_power(tp);
1374 /* Workaround for unstable PLL clock */
1375 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1376 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1377 u32 val = tr32(0x7d00);
1379 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1384 err = tg3_nvram_lock(tp);
1385 tg3_halt_cpu(tp, RX_CPU_BASE);
1387 tg3_nvram_unlock(tp);
1391 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393 /* Finally, set the new power state. */
1394 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1395 udelay(100); /* Delay after power state change */
1400 static void tg3_link_report(struct tg3 *tp)
1402 if (!netif_carrier_ok(tp->dev)) {
1403 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407 (tp->link_config.active_speed == SPEED_1000 ?
1409 (tp->link_config.active_speed == SPEED_100 ?
1411 (tp->link_config.active_duplex == DUPLEX_FULL ?
1414 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1417 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1418 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1422 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424 u32 new_tg3_flags = 0;
1425 u32 old_rx_mode = tp->rx_mode;
1426 u32 old_tx_mode = tp->tx_mode;
1428 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1430 /* Convert 1000BaseX flow control bits to 1000BaseT
1431 * bits before resolving flow control.
1433 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1434 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1435 ADVERTISE_PAUSE_ASYM);
1436 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438 if (local_adv & ADVERTISE_1000XPAUSE)
1439 local_adv |= ADVERTISE_PAUSE_CAP;
1440 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1441 local_adv |= ADVERTISE_PAUSE_ASYM;
1442 if (remote_adv & LPA_1000XPAUSE)
1443 remote_adv |= LPA_PAUSE_CAP;
1444 if (remote_adv & LPA_1000XPAUSE_ASYM)
1445 remote_adv |= LPA_PAUSE_ASYM;
1448 if (local_adv & ADVERTISE_PAUSE_CAP) {
1449 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450 if (remote_adv & LPA_PAUSE_CAP)
1452 (TG3_FLAG_RX_PAUSE |
1454 else if (remote_adv & LPA_PAUSE_ASYM)
1456 (TG3_FLAG_RX_PAUSE);
1458 if (remote_adv & LPA_PAUSE_CAP)
1460 (TG3_FLAG_RX_PAUSE |
1463 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1464 if ((remote_adv & LPA_PAUSE_CAP) &&
1465 (remote_adv & LPA_PAUSE_ASYM))
1466 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1469 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1470 tp->tg3_flags |= new_tg3_flags;
1472 new_tg3_flags = tp->tg3_flags;
1475 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1476 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480 if (old_rx_mode != tp->rx_mode) {
1481 tw32_f(MAC_RX_MODE, tp->rx_mode);
1484 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1485 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489 if (old_tx_mode != tp->tx_mode) {
1490 tw32_f(MAC_TX_MODE, tp->tx_mode);
1494 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1497 case MII_TG3_AUX_STAT_10HALF:
1499 *duplex = DUPLEX_HALF;
1502 case MII_TG3_AUX_STAT_10FULL:
1504 *duplex = DUPLEX_FULL;
1507 case MII_TG3_AUX_STAT_100HALF:
1509 *duplex = DUPLEX_HALF;
1512 case MII_TG3_AUX_STAT_100FULL:
1514 *duplex = DUPLEX_FULL;
1517 case MII_TG3_AUX_STAT_1000HALF:
1518 *speed = SPEED_1000;
1519 *duplex = DUPLEX_HALF;
1522 case MII_TG3_AUX_STAT_1000FULL:
1523 *speed = SPEED_1000;
1524 *duplex = DUPLEX_FULL;
1528 *speed = SPEED_INVALID;
1529 *duplex = DUPLEX_INVALID;
1534 static void tg3_phy_copper_begin(struct tg3 *tp)
1539 if (tp->link_config.phy_is_low_power) {
1540 /* Entering low power mode. Disable gigabit and
1541 * 100baseT advertisements.
1543 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1546 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1547 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1548 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1551 } else if (tp->link_config.speed == SPEED_INVALID) {
1552 tp->link_config.advertising =
1553 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1554 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1555 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1556 ADVERTISED_Autoneg | ADVERTISED_MII);
1558 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1559 tp->link_config.advertising &=
1560 ~(ADVERTISED_1000baseT_Half |
1561 ADVERTISED_1000baseT_Full);
1563 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1564 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1565 new_adv |= ADVERTISE_10HALF;
1566 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1567 new_adv |= ADVERTISE_10FULL;
1568 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1569 new_adv |= ADVERTISE_100HALF;
1570 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1571 new_adv |= ADVERTISE_100FULL;
1572 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574 if (tp->link_config.advertising &
1575 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1578 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1579 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1580 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1581 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1582 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1583 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1584 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1585 MII_TG3_CTRL_ENABLE_AS_MASTER);
1586 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588 tg3_writephy(tp, MII_TG3_CTRL, 0);
1591 /* Asking for a specific link mode. */
1592 if (tp->link_config.speed == SPEED_1000) {
1593 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1594 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596 if (tp->link_config.duplex == DUPLEX_FULL)
1597 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1600 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1601 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1602 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1603 MII_TG3_CTRL_ENABLE_AS_MASTER);
1604 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606 tg3_writephy(tp, MII_TG3_CTRL, 0);
1608 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1609 if (tp->link_config.speed == SPEED_100) {
1610 if (tp->link_config.duplex == DUPLEX_FULL)
1611 new_adv |= ADVERTISE_100FULL;
1613 new_adv |= ADVERTISE_100HALF;
1615 if (tp->link_config.duplex == DUPLEX_FULL)
1616 new_adv |= ADVERTISE_10FULL;
1618 new_adv |= ADVERTISE_10HALF;
1620 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1624 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1625 tp->link_config.speed != SPEED_INVALID) {
1626 u32 bmcr, orig_bmcr;
1628 tp->link_config.active_speed = tp->link_config.speed;
1629 tp->link_config.active_duplex = tp->link_config.duplex;
1632 switch (tp->link_config.speed) {
1638 bmcr |= BMCR_SPEED100;
1642 bmcr |= TG3_BMCR_SPEED1000;
1646 if (tp->link_config.duplex == DUPLEX_FULL)
1647 bmcr |= BMCR_FULLDPLX;
1649 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1650 (bmcr != orig_bmcr)) {
1651 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1652 for (i = 0; i < 1500; i++) {
1656 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1657 tg3_readphy(tp, MII_BMSR, &tmp))
1659 if (!(tmp & BMSR_LSTATUS)) {
1664 tg3_writephy(tp, MII_BMCR, bmcr);
1668 tg3_writephy(tp, MII_BMCR,
1669 BMCR_ANENABLE | BMCR_ANRESTART);
1673 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1677 /* Turn off tap power management. */
1678 /* Set Extended packet length bit */
1679 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1682 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1685 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1691 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1694 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1701 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703 u32 adv_reg, all_mask;
1705 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1708 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1709 ADVERTISE_100HALF | ADVERTISE_100FULL);
1710 if ((adv_reg & all_mask) != all_mask)
1712 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1715 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1718 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1719 MII_TG3_CTRL_ADV_1000_FULL);
1720 if ((tg3_ctrl & all_mask) != all_mask)
1726 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728 int current_link_up;
1737 (MAC_STATUS_SYNC_CHANGED |
1738 MAC_STATUS_CFG_CHANGED |
1739 MAC_STATUS_MI_COMPLETION |
1740 MAC_STATUS_LNKSTATE_CHANGED));
1743 tp->mi_mode = MAC_MI_MODE_BASE;
1744 tw32_f(MAC_MI_MODE, tp->mi_mode);
1747 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749 /* Some third-party PHYs need to be reset on link going
1752 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1755 netif_carrier_ok(tp->dev)) {
1756 tg3_readphy(tp, MII_BMSR, &bmsr);
1757 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758 !(bmsr & BMSR_LSTATUS))
1764 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1765 tg3_readphy(tp, MII_BMSR, &bmsr);
1766 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1767 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1770 if (!(bmsr & BMSR_LSTATUS)) {
1771 err = tg3_init_5401phy_dsp(tp);
1775 tg3_readphy(tp, MII_BMSR, &bmsr);
1776 for (i = 0; i < 1000; i++) {
1778 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1779 (bmsr & BMSR_LSTATUS)) {
1785 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1786 !(bmsr & BMSR_LSTATUS) &&
1787 tp->link_config.active_speed == SPEED_1000) {
1788 err = tg3_phy_reset(tp);
1790 err = tg3_init_5401phy_dsp(tp);
1795 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1796 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1797 /* 5701 {A0,B0} CRC bug workaround */
1798 tg3_writephy(tp, 0x15, 0x0a75);
1799 tg3_writephy(tp, 0x1c, 0x8c68);
1800 tg3_writephy(tp, 0x1c, 0x8d68);
1801 tg3_writephy(tp, 0x1c, 0x8c68);
1804 /* Clear pending interrupts... */
1805 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1809 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1815 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1816 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1817 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1822 current_link_up = 0;
1823 current_speed = SPEED_INVALID;
1824 current_duplex = DUPLEX_INVALID;
1826 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1830 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1831 if (!(val & (1 << 10))) {
1833 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1839 for (i = 0; i < 100; i++) {
1840 tg3_readphy(tp, MII_BMSR, &bmsr);
1841 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1842 (bmsr & BMSR_LSTATUS))
1847 if (bmsr & BMSR_LSTATUS) {
1850 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1851 for (i = 0; i < 2000; i++) {
1853 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1858 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1863 for (i = 0; i < 200; i++) {
1864 tg3_readphy(tp, MII_BMCR, &bmcr);
1865 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867 if (bmcr && bmcr != 0x7fff)
1872 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1873 if (bmcr & BMCR_ANENABLE) {
1874 current_link_up = 1;
1876 /* Force autoneg restart if we are exiting
1879 if (!tg3_copper_is_advertising_all(tp))
1880 current_link_up = 0;
1882 current_link_up = 0;
1885 if (!(bmcr & BMCR_ANENABLE) &&
1886 tp->link_config.speed == current_speed &&
1887 tp->link_config.duplex == current_duplex) {
1888 current_link_up = 1;
1890 current_link_up = 0;
1894 tp->link_config.active_speed = current_speed;
1895 tp->link_config.active_duplex = current_duplex;
1898 if (current_link_up == 1 &&
1899 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1900 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1901 u32 local_adv, remote_adv;
1903 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1910 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912 /* If we are not advertising full pause capability,
1913 * something is wrong. Bring the link down and reconfigure.
1915 if (local_adv != ADVERTISE_PAUSE_CAP) {
1916 current_link_up = 0;
1918 tg3_setup_flow_control(tp, local_adv, remote_adv);
1922 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1925 tg3_phy_copper_begin(tp);
1927 tg3_readphy(tp, MII_BMSR, &tmp);
1928 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1929 (tmp & BMSR_LSTATUS))
1930 current_link_up = 1;
1933 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1934 if (current_link_up == 1) {
1935 if (tp->link_config.active_speed == SPEED_100 ||
1936 tp->link_config.active_speed == SPEED_10)
1937 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1944 if (tp->link_config.active_duplex == DUPLEX_HALF)
1945 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1949 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1950 (current_link_up == 1 &&
1951 tp->link_config.active_speed == SPEED_10))
1952 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954 if (current_link_up == 1)
1955 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1958 /* ??? Without this setting Netgear GA302T PHY does not
1959 * ??? send/receive packets...
1961 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1962 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1963 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1964 tw32_f(MAC_MI_MODE, tp->mi_mode);
1968 tw32_f(MAC_MODE, tp->mac_mode);
1971 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1972 /* Polled via timer. */
1973 tw32_f(MAC_EVENT, 0);
1975 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1980 current_link_up == 1 &&
1981 tp->link_config.active_speed == SPEED_1000 &&
1982 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1983 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1986 (MAC_STATUS_SYNC_CHANGED |
1987 MAC_STATUS_CFG_CHANGED));
1990 NIC_SRAM_FIRMWARE_MBOX,
1991 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1994 if (current_link_up != netif_carrier_ok(tp->dev)) {
1995 if (current_link_up)
1996 netif_carrier_on(tp->dev);
1998 netif_carrier_off(tp->dev);
1999 tg3_link_report(tp);
2005 struct tg3_fiber_aneginfo {
2007 #define ANEG_STATE_UNKNOWN 0
2008 #define ANEG_STATE_AN_ENABLE 1
2009 #define ANEG_STATE_RESTART_INIT 2
2010 #define ANEG_STATE_RESTART 3
2011 #define ANEG_STATE_DISABLE_LINK_OK 4
2012 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2013 #define ANEG_STATE_ABILITY_DETECT 6
2014 #define ANEG_STATE_ACK_DETECT_INIT 7
2015 #define ANEG_STATE_ACK_DETECT 8
2016 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2017 #define ANEG_STATE_COMPLETE_ACK 10
2018 #define ANEG_STATE_IDLE_DETECT_INIT 11
2019 #define ANEG_STATE_IDLE_DETECT 12
2020 #define ANEG_STATE_LINK_OK 13
2021 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2022 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2025 #define MR_AN_ENABLE 0x00000001
2026 #define MR_RESTART_AN 0x00000002
2027 #define MR_AN_COMPLETE 0x00000004
2028 #define MR_PAGE_RX 0x00000008
2029 #define MR_NP_LOADED 0x00000010
2030 #define MR_TOGGLE_TX 0x00000020
2031 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2032 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2033 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2034 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2035 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2036 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2037 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2038 #define MR_TOGGLE_RX 0x00002000
2039 #define MR_NP_RX 0x00004000
2041 #define MR_LINK_OK 0x80000000
2043 unsigned long link_time, cur_time;
2045 u32 ability_match_cfg;
2046 int ability_match_count;
2048 char ability_match, idle_match, ack_match;
2050 u32 txconfig, rxconfig;
2051 #define ANEG_CFG_NP 0x00000080
2052 #define ANEG_CFG_ACK 0x00000040
2053 #define ANEG_CFG_RF2 0x00000020
2054 #define ANEG_CFG_RF1 0x00000010
2055 #define ANEG_CFG_PS2 0x00000001
2056 #define ANEG_CFG_PS1 0x00008000
2057 #define ANEG_CFG_HD 0x00004000
2058 #define ANEG_CFG_FD 0x00002000
2059 #define ANEG_CFG_INVAL 0x00001f06
2064 #define ANEG_TIMER_ENAB 2
2065 #define ANEG_FAILED -1
2067 #define ANEG_STATE_SETTLE_TIME 10000
2069 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2070 struct tg3_fiber_aneginfo *ap)
2072 unsigned long delta;
2076 if (ap->state == ANEG_STATE_UNKNOWN) {
2080 ap->ability_match_cfg = 0;
2081 ap->ability_match_count = 0;
2082 ap->ability_match = 0;
2088 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2089 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091 if (rx_cfg_reg != ap->ability_match_cfg) {
2092 ap->ability_match_cfg = rx_cfg_reg;
2093 ap->ability_match = 0;
2094 ap->ability_match_count = 0;
2096 if (++ap->ability_match_count > 1) {
2097 ap->ability_match = 1;
2098 ap->ability_match_cfg = rx_cfg_reg;
2101 if (rx_cfg_reg & ANEG_CFG_ACK)
2109 ap->ability_match_cfg = 0;
2110 ap->ability_match_count = 0;
2111 ap->ability_match = 0;
2117 ap->rxconfig = rx_cfg_reg;
2121 case ANEG_STATE_UNKNOWN:
2122 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2123 ap->state = ANEG_STATE_AN_ENABLE;
2126 case ANEG_STATE_AN_ENABLE:
2127 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2128 if (ap->flags & MR_AN_ENABLE) {
2131 ap->ability_match_cfg = 0;
2132 ap->ability_match_count = 0;
2133 ap->ability_match = 0;
2137 ap->state = ANEG_STATE_RESTART_INIT;
2139 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2143 case ANEG_STATE_RESTART_INIT:
2144 ap->link_time = ap->cur_time;
2145 ap->flags &= ~(MR_NP_LOADED);
2147 tw32(MAC_TX_AUTO_NEG, 0);
2148 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2149 tw32_f(MAC_MODE, tp->mac_mode);
2152 ret = ANEG_TIMER_ENAB;
2153 ap->state = ANEG_STATE_RESTART;
2156 case ANEG_STATE_RESTART:
2157 delta = ap->cur_time - ap->link_time;
2158 if (delta > ANEG_STATE_SETTLE_TIME) {
2159 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161 ret = ANEG_TIMER_ENAB;
2165 case ANEG_STATE_DISABLE_LINK_OK:
2169 case ANEG_STATE_ABILITY_DETECT_INIT:
2170 ap->flags &= ~(MR_TOGGLE_TX);
2171 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2172 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2173 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174 tw32_f(MAC_MODE, tp->mac_mode);
2177 ap->state = ANEG_STATE_ABILITY_DETECT;
2180 case ANEG_STATE_ABILITY_DETECT:
2181 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2182 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2186 case ANEG_STATE_ACK_DETECT_INIT:
2187 ap->txconfig |= ANEG_CFG_ACK;
2188 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2189 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2190 tw32_f(MAC_MODE, tp->mac_mode);
2193 ap->state = ANEG_STATE_ACK_DETECT;
2196 case ANEG_STATE_ACK_DETECT:
2197 if (ap->ack_match != 0) {
2198 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2199 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2200 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202 ap->state = ANEG_STATE_AN_ENABLE;
2204 } else if (ap->ability_match != 0 &&
2205 ap->rxconfig == 0) {
2206 ap->state = ANEG_STATE_AN_ENABLE;
2210 case ANEG_STATE_COMPLETE_ACK_INIT:
2211 if (ap->rxconfig & ANEG_CFG_INVAL) {
2215 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2216 MR_LP_ADV_HALF_DUPLEX |
2217 MR_LP_ADV_SYM_PAUSE |
2218 MR_LP_ADV_ASYM_PAUSE |
2219 MR_LP_ADV_REMOTE_FAULT1 |
2220 MR_LP_ADV_REMOTE_FAULT2 |
2221 MR_LP_ADV_NEXT_PAGE |
2224 if (ap->rxconfig & ANEG_CFG_FD)
2225 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2226 if (ap->rxconfig & ANEG_CFG_HD)
2227 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2228 if (ap->rxconfig & ANEG_CFG_PS1)
2229 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2230 if (ap->rxconfig & ANEG_CFG_PS2)
2231 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2232 if (ap->rxconfig & ANEG_CFG_RF1)
2233 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2234 if (ap->rxconfig & ANEG_CFG_RF2)
2235 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2236 if (ap->rxconfig & ANEG_CFG_NP)
2237 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239 ap->link_time = ap->cur_time;
2241 ap->flags ^= (MR_TOGGLE_TX);
2242 if (ap->rxconfig & 0x0008)
2243 ap->flags |= MR_TOGGLE_RX;
2244 if (ap->rxconfig & ANEG_CFG_NP)
2245 ap->flags |= MR_NP_RX;
2246 ap->flags |= MR_PAGE_RX;
2248 ap->state = ANEG_STATE_COMPLETE_ACK;
2249 ret = ANEG_TIMER_ENAB;
2252 case ANEG_STATE_COMPLETE_ACK:
2253 if (ap->ability_match != 0 &&
2254 ap->rxconfig == 0) {
2255 ap->state = ANEG_STATE_AN_ENABLE;
2258 delta = ap->cur_time - ap->link_time;
2259 if (delta > ANEG_STATE_SETTLE_TIME) {
2260 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2261 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2264 !(ap->flags & MR_NP_RX)) {
2265 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2273 case ANEG_STATE_IDLE_DETECT_INIT:
2274 ap->link_time = ap->cur_time;
2275 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2276 tw32_f(MAC_MODE, tp->mac_mode);
2279 ap->state = ANEG_STATE_IDLE_DETECT;
2280 ret = ANEG_TIMER_ENAB;
2283 case ANEG_STATE_IDLE_DETECT:
2284 if (ap->ability_match != 0 &&
2285 ap->rxconfig == 0) {
2286 ap->state = ANEG_STATE_AN_ENABLE;
2289 delta = ap->cur_time - ap->link_time;
2290 if (delta > ANEG_STATE_SETTLE_TIME) {
2291 /* XXX another gem from the Broadcom driver :( */
2292 ap->state = ANEG_STATE_LINK_OK;
2296 case ANEG_STATE_LINK_OK:
2297 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2301 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2302 /* ??? unimplemented */
2305 case ANEG_STATE_NEXT_PAGE_WAIT:
2306 /* ??? unimplemented */
2317 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2320 struct tg3_fiber_aneginfo aninfo;
2321 int status = ANEG_FAILED;
2325 tw32_f(MAC_TX_AUTO_NEG, 0);
2327 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2328 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2331 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2334 memset(&aninfo, 0, sizeof(aninfo));
2335 aninfo.flags |= MR_AN_ENABLE;
2336 aninfo.state = ANEG_STATE_UNKNOWN;
2337 aninfo.cur_time = 0;
2339 while (++tick < 195000) {
2340 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2341 if (status == ANEG_DONE || status == ANEG_FAILED)
2347 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2348 tw32_f(MAC_MODE, tp->mac_mode);
2351 *flags = aninfo.flags;
2353 if (status == ANEG_DONE &&
2354 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2355 MR_LP_ADV_FULL_DUPLEX)))
2361 static void tg3_init_bcm8002(struct tg3 *tp)
2363 u32 mac_status = tr32(MAC_STATUS);
2366 /* Reset when initting first time or we have a link. */
2367 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2368 !(mac_status & MAC_STATUS_PCS_SYNCED))
2371 /* Set PLL lock range. */
2372 tg3_writephy(tp, 0x16, 0x8007);
2375 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377 /* Wait for reset to complete. */
2378 /* XXX schedule_timeout() ... */
2379 for (i = 0; i < 500; i++)
2382 /* Config mode; select PMA/Ch 1 regs. */
2383 tg3_writephy(tp, 0x10, 0x8411);
2385 /* Enable auto-lock and comdet, select txclk for tx. */
2386 tg3_writephy(tp, 0x11, 0x0a10);
2388 tg3_writephy(tp, 0x18, 0x00a0);
2389 tg3_writephy(tp, 0x16, 0x41ff);
2391 /* Assert and deassert POR. */
2392 tg3_writephy(tp, 0x13, 0x0400);
2394 tg3_writephy(tp, 0x13, 0x0000);
2396 tg3_writephy(tp, 0x11, 0x0a50);
2398 tg3_writephy(tp, 0x11, 0x0a10);
2400 /* Wait for signal to stabilize */
2401 /* XXX schedule_timeout() ... */
2402 for (i = 0; i < 15000; i++)
2405 /* Deselect the channel register so we can read the PHYID
2408 tg3_writephy(tp, 0x10, 0x8011);
2411 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413 u32 sg_dig_ctrl, sg_dig_status;
2414 u32 serdes_cfg, expected_sg_dig_ctrl;
2415 int workaround, port_a;
2416 int current_link_up;
2419 expected_sg_dig_ctrl = 0;
2422 current_link_up = 0;
2424 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2425 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2430 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2431 /* preserve bits 20-23 for voltage regulator */
2432 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2435 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2438 if (sg_dig_ctrl & (1 << 31)) {
2440 u32 val = serdes_cfg;
2446 tw32_f(MAC_SERDES_CFG, val);
2448 tw32_f(SG_DIG_CTRL, 0x01388400);
2450 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2451 tg3_setup_flow_control(tp, 0, 0);
2452 current_link_up = 1;
2457 /* Want auto-negotiation. */
2458 expected_sg_dig_ctrl = 0x81388400;
2460 /* Pause capability */
2461 expected_sg_dig_ctrl |= (1 << 11);
2463 /* Asymettric pause */
2464 expected_sg_dig_ctrl |= (1 << 12);
2466 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2469 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2474 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2475 MAC_STATUS_SIGNAL_DET)) {
2478 /* Giver time to negotiate (~200ms) */
2479 for (i = 0; i < 40000; i++) {
2480 sg_dig_status = tr32(SG_DIG_STATUS);
2481 if (sg_dig_status & (0x3))
2485 mac_status = tr32(MAC_STATUS);
2487 if ((sg_dig_status & (1 << 1)) &&
2488 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2489 u32 local_adv, remote_adv;
2491 local_adv = ADVERTISE_PAUSE_CAP;
2493 if (sg_dig_status & (1 << 19))
2494 remote_adv |= LPA_PAUSE_CAP;
2495 if (sg_dig_status & (1 << 20))
2496 remote_adv |= LPA_PAUSE_ASYM;
2498 tg3_setup_flow_control(tp, local_adv, remote_adv);
2499 current_link_up = 1;
2500 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501 } else if (!(sg_dig_status & (1 << 1))) {
2502 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2503 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2506 u32 val = serdes_cfg;
2513 tw32_f(MAC_SERDES_CFG, val);
2516 tw32_f(SG_DIG_CTRL, 0x01388400);
2519 /* Link parallel detection - link is up */
2520 /* only if we have PCS_SYNC and not */
2521 /* receiving config code words */
2522 mac_status = tr32(MAC_STATUS);
2523 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2524 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2525 tg3_setup_flow_control(tp, 0, 0);
2526 current_link_up = 1;
2533 return current_link_up;
2536 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538 int current_link_up = 0;
2540 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2541 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2545 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2549 if (fiber_autoneg(tp, &flags)) {
2550 u32 local_adv, remote_adv;
2552 local_adv = ADVERTISE_PAUSE_CAP;
2554 if (flags & MR_LP_ADV_SYM_PAUSE)
2555 remote_adv |= LPA_PAUSE_CAP;
2556 if (flags & MR_LP_ADV_ASYM_PAUSE)
2557 remote_adv |= LPA_PAUSE_ASYM;
2559 tg3_setup_flow_control(tp, local_adv, remote_adv);
2561 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2562 current_link_up = 1;
2564 for (i = 0; i < 30; i++) {
2567 (MAC_STATUS_SYNC_CHANGED |
2568 MAC_STATUS_CFG_CHANGED));
2570 if ((tr32(MAC_STATUS) &
2571 (MAC_STATUS_SYNC_CHANGED |
2572 MAC_STATUS_CFG_CHANGED)) == 0)
2576 mac_status = tr32(MAC_STATUS);
2577 if (current_link_up == 0 &&
2578 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2579 !(mac_status & MAC_STATUS_RCVD_CFG))
2580 current_link_up = 1;
2582 /* Forcing 1000FD link up. */
2583 current_link_up = 1;
2584 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2591 return current_link_up;
2594 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2597 u16 orig_active_speed;
2598 u8 orig_active_duplex;
2600 int current_link_up;
2604 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2605 TG3_FLAG_TX_PAUSE));
2606 orig_active_speed = tp->link_config.active_speed;
2607 orig_active_duplex = tp->link_config.active_duplex;
2609 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2610 netif_carrier_ok(tp->dev) &&
2611 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2612 mac_status = tr32(MAC_STATUS);
2613 mac_status &= (MAC_STATUS_PCS_SYNCED |
2614 MAC_STATUS_SIGNAL_DET |
2615 MAC_STATUS_CFG_CHANGED |
2616 MAC_STATUS_RCVD_CFG);
2617 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2618 MAC_STATUS_SIGNAL_DET)) {
2619 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2620 MAC_STATUS_CFG_CHANGED));
2625 tw32_f(MAC_TX_AUTO_NEG, 0);
2627 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2628 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2629 tw32_f(MAC_MODE, tp->mac_mode);
2632 if (tp->phy_id == PHY_ID_BCM8002)
2633 tg3_init_bcm8002(tp);
2635 /* Enable link change event even when serdes polling. */
2636 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2639 current_link_up = 0;
2640 mac_status = tr32(MAC_STATUS);
2642 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2643 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2648 tw32_f(MAC_MODE, tp->mac_mode);
2651 tp->hw_status->status =
2652 (SD_STATUS_UPDATED |
2653 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655 for (i = 0; i < 100; i++) {
2656 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2657 MAC_STATUS_CFG_CHANGED));
2659 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2660 MAC_STATUS_CFG_CHANGED)) == 0)
2664 mac_status = tr32(MAC_STATUS);
2665 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2666 current_link_up = 0;
2667 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2668 tw32_f(MAC_MODE, (tp->mac_mode |
2669 MAC_MODE_SEND_CONFIGS));
2671 tw32_f(MAC_MODE, tp->mac_mode);
2675 if (current_link_up == 1) {
2676 tp->link_config.active_speed = SPEED_1000;
2677 tp->link_config.active_duplex = DUPLEX_FULL;
2678 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2679 LED_CTRL_LNKLED_OVERRIDE |
2680 LED_CTRL_1000MBPS_ON));
2682 tp->link_config.active_speed = SPEED_INVALID;
2683 tp->link_config.active_duplex = DUPLEX_INVALID;
2684 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2685 LED_CTRL_LNKLED_OVERRIDE |
2686 LED_CTRL_TRAFFIC_OVERRIDE));
2689 if (current_link_up != netif_carrier_ok(tp->dev)) {
2690 if (current_link_up)
2691 netif_carrier_on(tp->dev);
2693 netif_carrier_off(tp->dev);
2694 tg3_link_report(tp);
2697 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699 if (orig_pause_cfg != now_pause_cfg ||
2700 orig_active_speed != tp->link_config.active_speed ||
2701 orig_active_duplex != tp->link_config.active_duplex)
2702 tg3_link_report(tp);
2708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710 int current_link_up, err = 0;
2715 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2716 tw32_f(MAC_MODE, tp->mac_mode);
2722 (MAC_STATUS_SYNC_CHANGED |
2723 MAC_STATUS_CFG_CHANGED |
2724 MAC_STATUS_MI_COMPLETION |
2725 MAC_STATUS_LNKSTATE_CHANGED));
2731 current_link_up = 0;
2732 current_speed = SPEED_INVALID;
2733 current_duplex = DUPLEX_INVALID;
2735 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2738 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2739 bmsr |= BMSR_LSTATUS;
2741 bmsr &= ~BMSR_LSTATUS;
2744 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2747 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2748 /* do nothing, just check for link up at the end */
2749 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2752 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2754 ADVERTISE_1000XPAUSE |
2755 ADVERTISE_1000XPSE_ASYM |
2758 /* Always advertise symmetric PAUSE just like copper */
2759 new_adv |= ADVERTISE_1000XPAUSE;
2761 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2762 new_adv |= ADVERTISE_1000XHALF;
2763 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2764 new_adv |= ADVERTISE_1000XFULL;
2766 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2767 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2768 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2769 tg3_writephy(tp, MII_BMCR, bmcr);
2771 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2773 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2780 bmcr &= ~BMCR_SPEED1000;
2781 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783 if (tp->link_config.duplex == DUPLEX_FULL)
2784 new_bmcr |= BMCR_FULLDPLX;
2786 if (new_bmcr != bmcr) {
2787 /* BMCR_SPEED1000 is a reserved bit that needs
2788 * to be set on write.
2790 new_bmcr |= BMCR_SPEED1000;
2792 /* Force a linkdown */
2793 if (netif_carrier_ok(tp->dev)) {
2796 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2797 adv &= ~(ADVERTISE_1000XFULL |
2798 ADVERTISE_1000XHALF |
2800 tg3_writephy(tp, MII_ADVERTISE, adv);
2801 tg3_writephy(tp, MII_BMCR, bmcr |
2805 netif_carrier_off(tp->dev);
2807 tg3_writephy(tp, MII_BMCR, new_bmcr);
2809 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2814 bmsr |= BMSR_LSTATUS;
2816 bmsr &= ~BMSR_LSTATUS;
2818 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2822 if (bmsr & BMSR_LSTATUS) {
2823 current_speed = SPEED_1000;
2824 current_link_up = 1;
2825 if (bmcr & BMCR_FULLDPLX)
2826 current_duplex = DUPLEX_FULL;
2828 current_duplex = DUPLEX_HALF;
2830 if (bmcr & BMCR_ANENABLE) {
2831 u32 local_adv, remote_adv, common;
2833 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2834 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2835 common = local_adv & remote_adv;
2836 if (common & (ADVERTISE_1000XHALF |
2837 ADVERTISE_1000XFULL)) {
2838 if (common & ADVERTISE_1000XFULL)
2839 current_duplex = DUPLEX_FULL;
2841 current_duplex = DUPLEX_HALF;
2843 tg3_setup_flow_control(tp, local_adv,
2847 current_link_up = 0;
2851 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2852 if (tp->link_config.active_duplex == DUPLEX_HALF)
2853 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855 tw32_f(MAC_MODE, tp->mac_mode);
2858 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860 tp->link_config.active_speed = current_speed;
2861 tp->link_config.active_duplex = current_duplex;
2863 if (current_link_up != netif_carrier_ok(tp->dev)) {
2864 if (current_link_up)
2865 netif_carrier_on(tp->dev);
2867 netif_carrier_off(tp->dev);
2868 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870 tg3_link_report(tp);
2875 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2878 /* Give autoneg time to complete. */
2879 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2882 if (!netif_carrier_ok(tp->dev) &&
2883 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2886 tg3_readphy(tp, MII_BMCR, &bmcr);
2887 if (bmcr & BMCR_ANENABLE) {
2890 /* Select shadow register 0x1f */
2891 tg3_writephy(tp, 0x1c, 0x7c00);
2892 tg3_readphy(tp, 0x1c, &phy1);
2894 /* Select expansion interrupt status register */
2895 tg3_writephy(tp, 0x17, 0x0f01);
2896 tg3_readphy(tp, 0x15, &phy2);
2897 tg3_readphy(tp, 0x15, &phy2);
2899 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2900 /* We have signal detect and not receiving
2901 * config code words, link is up by parallel
2905 bmcr &= ~BMCR_ANENABLE;
2906 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2907 tg3_writephy(tp, MII_BMCR, bmcr);
2908 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2912 else if (netif_carrier_ok(tp->dev) &&
2913 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2914 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2917 /* Select expansion interrupt status register */
2918 tg3_writephy(tp, 0x17, 0x0f01);
2919 tg3_readphy(tp, 0x15, &phy2);
2923 /* Config code words received, turn on autoneg. */
2924 tg3_readphy(tp, MII_BMCR, &bmcr);
2925 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2933 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2937 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2938 err = tg3_setup_fiber_phy(tp, force_reset);
2939 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2940 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2942 err = tg3_setup_copper_phy(tp, force_reset);
2945 if (tp->link_config.active_speed == SPEED_1000 &&
2946 tp->link_config.active_duplex == DUPLEX_HALF)
2947 tw32(MAC_TX_LENGTHS,
2948 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2949 (6 << TX_LENGTHS_IPG_SHIFT) |
2950 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952 tw32(MAC_TX_LENGTHS,
2953 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2954 (6 << TX_LENGTHS_IPG_SHIFT) |
2955 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2958 if (netif_carrier_ok(tp->dev)) {
2959 tw32(HOSTCC_STAT_COAL_TICKS,
2960 tp->coal.stats_block_coalesce_usecs);
2962 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2969 /* This is called whenever we suspect that the system chipset is re-
2970 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2971 * is bogus tx completions. We try to recover by setting the
2972 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2975 static void tg3_tx_recover(struct tg3 *tp)
2977 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2978 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2981 "mapped I/O cycles to the network device, attempting to "
2982 "recover. Please report the problem to the driver maintainer "
2983 "and include system chipset information.\n", tp->dev->name);
2985 spin_lock(&tp->lock);
2986 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2987 spin_unlock(&tp->lock);
2990 /* Tigon3 never reports partial packet sends. So we do not
2991 * need special logic to handle SKBs that have not had all
2992 * of their frags sent yet, like SunGEM does.
2994 static void tg3_tx(struct tg3 *tp)
2996 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2997 u32 sw_idx = tp->tx_cons;
2999 while (sw_idx != hw_idx) {
3000 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3001 struct sk_buff *skb = ri->skb;
3004 if (unlikely(skb == NULL)) {
3009 pci_unmap_single(tp->pdev,
3010 pci_unmap_addr(ri, mapping),
3016 sw_idx = NEXT_TX(sw_idx);
3018 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3019 ri = &tp->tx_buffers[sw_idx];
3020 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3023 pci_unmap_page(tp->pdev,
3024 pci_unmap_addr(ri, mapping),
3025 skb_shinfo(skb)->frags[i].size,
3028 sw_idx = NEXT_TX(sw_idx);
3033 if (unlikely(tx_bug)) {
3039 tp->tx_cons = sw_idx;
3041 if (unlikely(netif_queue_stopped(tp->dev))) {
3042 spin_lock(&tp->tx_lock);
3043 if (netif_queue_stopped(tp->dev) &&
3044 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3045 netif_wake_queue(tp->dev);
3046 spin_unlock(&tp->tx_lock);
3050 /* Returns size of skb allocated or < 0 on error.
3052 * We only need to fill in the address because the other members
3053 * of the RX descriptor are invariant, see tg3_init_rings.
3055 * Note the purposeful assymetry of cpu vs. chip accesses. For
3056 * posting buffers we only dirty the first cache line of the RX
3057 * descriptor (containing the address). Whereas for the RX status
3058 * buffers the cpu only reads the last cacheline of the RX descriptor
3059 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3061 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3062 int src_idx, u32 dest_idx_unmasked)
3064 struct tg3_rx_buffer_desc *desc;
3065 struct ring_info *map, *src_map;
3066 struct sk_buff *skb;
3068 int skb_size, dest_idx;
3071 switch (opaque_key) {
3072 case RXD_OPAQUE_RING_STD:
3073 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3074 desc = &tp->rx_std[dest_idx];
3075 map = &tp->rx_std_buffers[dest_idx];
3077 src_map = &tp->rx_std_buffers[src_idx];
3078 skb_size = tp->rx_pkt_buf_sz;
3081 case RXD_OPAQUE_RING_JUMBO:
3082 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3083 desc = &tp->rx_jumbo[dest_idx];
3084 map = &tp->rx_jumbo_buffers[dest_idx];
3086 src_map = &tp->rx_jumbo_buffers[src_idx];
3087 skb_size = RX_JUMBO_PKT_BUF_SZ;
3094 /* Do not overwrite any of the map or rp information
3095 * until we are sure we can commit to a new buffer.
3097 * Callers depend upon this behavior and assume that
3098 * we leave everything unchanged if we fail.
3100 skb = netdev_alloc_skb(tp->dev, skb_size);
3104 skb_reserve(skb, tp->rx_offset);
3106 mapping = pci_map_single(tp->pdev, skb->data,
3107 skb_size - tp->rx_offset,
3108 PCI_DMA_FROMDEVICE);
3111 pci_unmap_addr_set(map, mapping, mapping);
3113 if (src_map != NULL)
3114 src_map->skb = NULL;
3116 desc->addr_hi = ((u64)mapping >> 32);
3117 desc->addr_lo = ((u64)mapping & 0xffffffff);
3122 /* We only need to move over in the address because the other
3123 * members of the RX descriptor are invariant. See notes above
3124 * tg3_alloc_rx_skb for full details.
3126 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3127 int src_idx, u32 dest_idx_unmasked)
3129 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3130 struct ring_info *src_map, *dest_map;
3133 switch (opaque_key) {
3134 case RXD_OPAQUE_RING_STD:
3135 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3136 dest_desc = &tp->rx_std[dest_idx];
3137 dest_map = &tp->rx_std_buffers[dest_idx];
3138 src_desc = &tp->rx_std[src_idx];
3139 src_map = &tp->rx_std_buffers[src_idx];
3142 case RXD_OPAQUE_RING_JUMBO:
3143 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3144 dest_desc = &tp->rx_jumbo[dest_idx];
3145 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3146 src_desc = &tp->rx_jumbo[src_idx];
3147 src_map = &tp->rx_jumbo_buffers[src_idx];
3154 dest_map->skb = src_map->skb;
3155 pci_unmap_addr_set(dest_map, mapping,
3156 pci_unmap_addr(src_map, mapping));
3157 dest_desc->addr_hi = src_desc->addr_hi;
3158 dest_desc->addr_lo = src_desc->addr_lo;
3160 src_map->skb = NULL;
3163 #if TG3_VLAN_TAG_USED
3164 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3166 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3170 /* The RX ring scheme is composed of multiple rings which post fresh
3171 * buffers to the chip, and one special ring the chip uses to report
3172 * status back to the host.
3174 * The special ring reports the status of received packets to the
3175 * host. The chip does not write into the original descriptor the
3176 * RX buffer was obtained from. The chip simply takes the original
3177 * descriptor as provided by the host, updates the status and length
3178 * field, then writes this into the next status ring entry.
3180 * Each ring the host uses to post buffers to the chip is described
3181 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3182 * it is first placed into the on-chip ram. When the packet's length
3183 * is known, it walks down the TG3_BDINFO entries to select the ring.
3184 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3185 * which is within the range of the new packet's length is chosen.
3187 * The "separate ring for rx status" scheme may sound queer, but it makes
3188 * sense from a cache coherency perspective. If only the host writes
3189 * to the buffer post rings, and only the chip writes to the rx status
3190 * rings, then cache lines never move beyond shared-modified state.
3191 * If both the host and chip were to write into the same ring, cache line
3192 * eviction could occur since both entities want it in an exclusive state.
3194 static int tg3_rx(struct tg3 *tp, int budget)
3196 u32 work_mask, rx_std_posted = 0;
3197 u32 sw_idx = tp->rx_rcb_ptr;
3201 hw_idx = tp->hw_status->idx[0].rx_producer;
3203 * We need to order the read of hw_idx and the read of
3204 * the opaque cookie.
3209 while (sw_idx != hw_idx && budget > 0) {
3210 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3212 struct sk_buff *skb;
3213 dma_addr_t dma_addr;
3214 u32 opaque_key, desc_idx, *post_ptr;
3216 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3217 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3218 if (opaque_key == RXD_OPAQUE_RING_STD) {
3219 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3221 skb = tp->rx_std_buffers[desc_idx].skb;
3222 post_ptr = &tp->rx_std_ptr;
3224 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3225 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3227 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3228 post_ptr = &tp->rx_jumbo_ptr;
3231 goto next_pkt_nopost;
3234 work_mask |= opaque_key;
3236 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3237 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3239 tg3_recycle_rx(tp, opaque_key,
3240 desc_idx, *post_ptr);
3242 /* Other statistics kept track of by card. */
3243 tp->net_stats.rx_dropped++;
3247 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3249 if (len > RX_COPY_THRESHOLD
3250 && tp->rx_offset == 2
3251 /* rx_offset != 2 iff this is a 5701 card running
3252 * in PCI-X mode [see tg3_get_invariants()] */
3256 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3257 desc_idx, *post_ptr);
3261 pci_unmap_single(tp->pdev, dma_addr,
3262 skb_size - tp->rx_offset,
3263 PCI_DMA_FROMDEVICE);
3267 struct sk_buff *copy_skb;
3269 tg3_recycle_rx(tp, opaque_key,
3270 desc_idx, *post_ptr);
3272 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3273 if (copy_skb == NULL)
3274 goto drop_it_no_recycle;
3276 skb_reserve(copy_skb, 2);
3277 skb_put(copy_skb, len);
3278 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3279 memcpy(copy_skb->data, skb->data, len);
3280 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3282 /* We'll reuse the original ring buffer. */
3286 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3287 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3288 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3289 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3290 skb->ip_summed = CHECKSUM_UNNECESSARY;
3292 skb->ip_summed = CHECKSUM_NONE;
3294 skb->protocol = eth_type_trans(skb, tp->dev);
3295 #if TG3_VLAN_TAG_USED
3296 if (tp->vlgrp != NULL &&
3297 desc->type_flags & RXD_FLAG_VLAN) {
3298 tg3_vlan_rx(tp, skb,
3299 desc->err_vlan & RXD_VLAN_MASK);
3302 netif_receive_skb(skb);
3304 tp->dev->last_rx = jiffies;
3311 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3312 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3314 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3315 TG3_64BIT_REG_LOW, idx);
3316 work_mask &= ~RXD_OPAQUE_RING_STD;
3321 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3323 /* Refresh hw_idx to see if there is new work */
3324 if (sw_idx == hw_idx) {
3325 hw_idx = tp->hw_status->idx[0].rx_producer;
3330 /* ACK the status ring. */
3331 tp->rx_rcb_ptr = sw_idx;
3332 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3334 /* Refill RX ring(s). */
3335 if (work_mask & RXD_OPAQUE_RING_STD) {
3336 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3337 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3340 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3341 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3342 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3350 static int tg3_poll(struct net_device *netdev, int *budget)
3352 struct tg3 *tp = netdev_priv(netdev);
3353 struct tg3_hw_status *sblk = tp->hw_status;
3356 /* handle link change and other phy events */
3357 if (!(tp->tg3_flags &
3358 (TG3_FLAG_USE_LINKCHG_REG |
3359 TG3_FLAG_POLL_SERDES))) {
3360 if (sblk->status & SD_STATUS_LINK_CHG) {
3361 sblk->status = SD_STATUS_UPDATED |
3362 (sblk->status & ~SD_STATUS_LINK_CHG);
3363 spin_lock(&tp->lock);
3364 tg3_setup_phy(tp, 0);
3365 spin_unlock(&tp->lock);
3369 /* run TX completion thread */
3370 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3372 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3373 netif_rx_complete(netdev);
3374 schedule_work(&tp->reset_task);
3379 /* run RX thread, within the bounds set by NAPI.
3380 * All RX "locking" is done by ensuring outside
3381 * code synchronizes with dev->poll()
3383 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3384 int orig_budget = *budget;
3387 if (orig_budget > netdev->quota)
3388 orig_budget = netdev->quota;
3390 work_done = tg3_rx(tp, orig_budget);
3392 *budget -= work_done;
3393 netdev->quota -= work_done;
3396 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3397 tp->last_tag = sblk->status_tag;
3400 sblk->status &= ~SD_STATUS_UPDATED;
3402 /* if no more work, tell net stack and NIC we're done */
3403 done = !tg3_has_work(tp);
3405 netif_rx_complete(netdev);
3406 tg3_restart_ints(tp);
3409 return (done ? 0 : 1);
3412 static void tg3_irq_quiesce(struct tg3 *tp)
3414 BUG_ON(tp->irq_sync);
3419 synchronize_irq(tp->pdev->irq);
3422 static inline int tg3_irq_sync(struct tg3 *tp)
3424 return tp->irq_sync;
3427 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3428 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3429 * with as well. Most of the time, this is not necessary except when
3430 * shutting down the device.
3432 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3435 tg3_irq_quiesce(tp);
3436 spin_lock_bh(&tp->lock);
3439 static inline void tg3_full_unlock(struct tg3 *tp)
3441 spin_unlock_bh(&tp->lock);
3444 /* One-shot MSI handler - Chip automatically disables interrupt
3445 * after sending MSI so driver doesn't have to do it.
3447 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3449 struct net_device *dev = dev_id;
3450 struct tg3 *tp = netdev_priv(dev);
3452 prefetch(tp->hw_status);
3453 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3455 if (likely(!tg3_irq_sync(tp)))
3456 netif_rx_schedule(dev); /* schedule NAPI poll */
3461 /* MSI ISR - No need to check for interrupt sharing and no need to
3462 * flush status block and interrupt mailbox. PCI ordering rules
3463 * guarantee that MSI will arrive after the status block.
3465 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3467 struct net_device *dev = dev_id;
3468 struct tg3 *tp = netdev_priv(dev);
3470 prefetch(tp->hw_status);
3471 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3473 * Writing any value to intr-mbox-0 clears PCI INTA# and
3474 * chip-internal interrupt pending events.
3475 * Writing non-zero to intr-mbox-0 additional tells the
3476 * NIC to stop sending us irqs, engaging "in-intr-handler"
3479 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3480 if (likely(!tg3_irq_sync(tp)))
3481 netif_rx_schedule(dev); /* schedule NAPI poll */
3483 return IRQ_RETVAL(1);
3486 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3488 struct net_device *dev = dev_id;
3489 struct tg3 *tp = netdev_priv(dev);
3490 struct tg3_hw_status *sblk = tp->hw_status;
3491 unsigned int handled = 1;
3493 /* In INTx mode, it is possible for the interrupt to arrive at
3494 * the CPU before the status block posted prior to the interrupt.
3495 * Reading the PCI State register will confirm whether the
3496 * interrupt is ours and will flush the status block.
3498 if ((sblk->status & SD_STATUS_UPDATED) ||
3499 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3501 * Writing any value to intr-mbox-0 clears PCI INTA# and
3502 * chip-internal interrupt pending events.
3503 * Writing non-zero to intr-mbox-0 additional tells the
3504 * NIC to stop sending us irqs, engaging "in-intr-handler"
3507 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3509 if (tg3_irq_sync(tp))
3511 sblk->status &= ~SD_STATUS_UPDATED;
3512 if (likely(tg3_has_work(tp))) {
3513 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3514 netif_rx_schedule(dev); /* schedule NAPI poll */
3516 /* No work, shared interrupt perhaps? re-enable
3517 * interrupts, and flush that PCI write
3519 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3522 } else { /* shared interrupt */
3526 return IRQ_RETVAL(handled);
3529 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3531 struct net_device *dev = dev_id;
3532 struct tg3 *tp = netdev_priv(dev);
3533 struct tg3_hw_status *sblk = tp->hw_status;
3534 unsigned int handled = 1;
3536 /* In INTx mode, it is possible for the interrupt to arrive at
3537 * the CPU before the status block posted prior to the interrupt.
3538 * Reading the PCI State register will confirm whether the
3539 * interrupt is ours and will flush the status block.
3541 if ((sblk->status_tag != tp->last_tag) ||
3542 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3544 * writing any value to intr-mbox-0 clears PCI INTA# and
3545 * chip-internal interrupt pending events.
3546 * writing non-zero to intr-mbox-0 additional tells the
3547 * NIC to stop sending us irqs, engaging "in-intr-handler"
3550 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3552 if (tg3_irq_sync(tp))
3554 if (netif_rx_schedule_prep(dev)) {
3555 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3556 /* Update last_tag to mark that this status has been
3557 * seen. Because interrupt may be shared, we may be
3558 * racing with tg3_poll(), so only update last_tag
3559 * if tg3_poll() is not scheduled.
3561 tp->last_tag = sblk->status_tag;
3562 __netif_rx_schedule(dev);
3564 } else { /* shared interrupt */
3568 return IRQ_RETVAL(handled);
3571 /* ISR for interrupt test */
3572 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3573 struct pt_regs *regs)
3575 struct net_device *dev = dev_id;
3576 struct tg3 *tp = netdev_priv(dev);
3577 struct tg3_hw_status *sblk = tp->hw_status;
3579 if ((sblk->status & SD_STATUS_UPDATED) ||
3580 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3581 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3583 return IRQ_RETVAL(1);
3585 return IRQ_RETVAL(0);
3588 static int tg3_init_hw(struct tg3 *, int);
3589 static int tg3_halt(struct tg3 *, int, int);
3591 /* Restart hardware after configuration changes, self-test, etc.
3592 * Invoked with tp->lock held.
3594 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3598 err = tg3_init_hw(tp, reset_phy);
3600 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3601 "aborting.\n", tp->dev->name);
3602 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3603 tg3_full_unlock(tp);
3604 del_timer_sync(&tp->timer);
3606 netif_poll_enable(tp->dev);
3608 tg3_full_lock(tp, 0);
3613 #ifdef CONFIG_NET_POLL_CONTROLLER
3614 static void tg3_poll_controller(struct net_device *dev)
3616 struct tg3 *tp = netdev_priv(dev);
3618 tg3_interrupt(tp->pdev->irq, dev, NULL);
3622 static void tg3_reset_task(void *_data)
3624 struct tg3 *tp = _data;
3625 unsigned int restart_timer;
3627 tg3_full_lock(tp, 0);
3628 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3630 if (!netif_running(tp->dev)) {
3631 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3632 tg3_full_unlock(tp);
3636 tg3_full_unlock(tp);
3640 tg3_full_lock(tp, 1);
3642 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3643 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3645 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3646 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3647 tp->write32_rx_mbox = tg3_write_flush_reg32;
3648 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3649 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3652 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3653 if (tg3_init_hw(tp, 1))
3656 tg3_netif_start(tp);
3659 mod_timer(&tp->timer, jiffies + 1);
3662 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3664 tg3_full_unlock(tp);
3667 static void tg3_tx_timeout(struct net_device *dev)
3669 struct tg3 *tp = netdev_priv(dev);
3671 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3674 schedule_work(&tp->reset_task);
3677 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3678 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3680 u32 base = (u32) mapping & 0xffffffff;
3682 return ((base > 0xffffdcc0) &&
3683 (base + len + 8 < base));
3686 /* Test for DMA addresses > 40-bit */
3687 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3690 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3691 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3692 return (((u64) mapping + len) > DMA_40BIT_MASK);
3699 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3701 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3702 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3703 u32 last_plus_one, u32 *start,
3704 u32 base_flags, u32 mss)
3706 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3707 dma_addr_t new_addr = 0;
3714 /* New SKB is guaranteed to be linear. */
3716 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3718 /* Make sure new skb does not cross any 4G boundaries.
3719 * Drop the packet if it does.
3721 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3723 dev_kfree_skb(new_skb);
3726 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3727 base_flags, 1 | (mss << 1));
3728 *start = NEXT_TX(entry);
3732 /* Now clean up the sw ring entries. */
3734 while (entry != last_plus_one) {
3738 len = skb_headlen(skb);
3740 len = skb_shinfo(skb)->frags[i-1].size;
3741 pci_unmap_single(tp->pdev,
3742 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3743 len, PCI_DMA_TODEVICE);
3745 tp->tx_buffers[entry].skb = new_skb;
3746 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3748 tp->tx_buffers[entry].skb = NULL;
3750 entry = NEXT_TX(entry);
3759 static void tg3_set_txd(struct tg3 *tp, int entry,
3760 dma_addr_t mapping, int len, u32 flags,
3763 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3764 int is_end = (mss_and_is_end & 0x1);
3765 u32 mss = (mss_and_is_end >> 1);
3769 flags |= TXD_FLAG_END;
3770 if (flags & TXD_FLAG_VLAN) {
3771 vlan_tag = flags >> 16;
3774 vlan_tag |= (mss << TXD_MSS_SHIFT);
3776 txd->addr_hi = ((u64) mapping >> 32);
3777 txd->addr_lo = ((u64) mapping & 0xffffffff);
3778 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3779 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3782 /* hard_start_xmit for devices that don't have any bugs and
3783 * support TG3_FLG2_HW_TSO_2 only.
3785 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3787 struct tg3 *tp = netdev_priv(dev);
3789 u32 len, entry, base_flags, mss;
3791 len = skb_headlen(skb);
3793 /* We are running in BH disabled context with netif_tx_lock
3794 * and TX reclaim runs via tp->poll inside of a software
3795 * interrupt. Furthermore, IRQ processing runs lockless so we have
3796 * no IRQ context deadlocks to worry about either. Rejoice!
3798 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3799 if (!netif_queue_stopped(dev)) {
3800 netif_stop_queue(dev);
3802 /* This is a hard error, log it. */
3803 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3804 "queue awake!\n", dev->name);
3806 return NETDEV_TX_BUSY;
3809 entry = tp->tx_prod;
3811 #if TG3_TSO_SUPPORT != 0
3813 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3814 (mss = skb_shinfo(skb)->gso_size) != 0) {
3815 int tcp_opt_len, ip_tcp_len;
3817 if (skb_header_cloned(skb) &&
3818 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3823 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3824 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3826 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3827 ip_tcp_len = (skb->nh.iph->ihl * 4) +
3828 sizeof(struct tcphdr);
3830 skb->nh.iph->check = 0;
3831 skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3833 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3836 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3837 TXD_FLAG_CPU_POST_DMA);
3839 skb->h.th->check = 0;
3842 else if (skb->ip_summed == CHECKSUM_HW)
3843 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3846 if (skb->ip_summed == CHECKSUM_HW)
3847 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3849 #if TG3_VLAN_TAG_USED
3850 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3851 base_flags |= (TXD_FLAG_VLAN |
3852 (vlan_tx_tag_get(skb) << 16));
3855 /* Queue skb data, a.k.a. the main skb fragment. */
3856 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3858 tp->tx_buffers[entry].skb = skb;
3859 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3861 tg3_set_txd(tp, entry, mapping, len, base_flags,
3862 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3864 entry = NEXT_TX(entry);
3866 /* Now loop through additional data fragments, and queue them. */
3867 if (skb_shinfo(skb)->nr_frags > 0) {
3868 unsigned int i, last;
3870 last = skb_shinfo(skb)->nr_frags - 1;
3871 for (i = 0; i <= last; i++) {
3872 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3875 mapping = pci_map_page(tp->pdev,
3878 len, PCI_DMA_TODEVICE);
3880 tp->tx_buffers[entry].skb = NULL;
3881 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3883 tg3_set_txd(tp, entry, mapping, len,
3884 base_flags, (i == last) | (mss << 1));
3886 entry = NEXT_TX(entry);
3890 /* Packets are ready, update Tx producer idx local and on card. */
3891 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3893 tp->tx_prod = entry;
3894 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3895 spin_lock(&tp->tx_lock);
3896 netif_stop_queue(dev);
3897 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3898 netif_wake_queue(tp->dev);
3899 spin_unlock(&tp->tx_lock);
3905 dev->trans_start = jiffies;
3907 return NETDEV_TX_OK;
3910 #if TG3_TSO_SUPPORT != 0
3911 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3913 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3914 * TSO header is greater than 80 bytes.
3916 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3918 struct sk_buff *segs, *nskb;
3920 /* Estimate the number of fragments in the worst case */
3921 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3922 netif_stop_queue(tp->dev);
3923 return NETDEV_TX_BUSY;
3926 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3927 if (unlikely(IS_ERR(segs)))
3928 goto tg3_tso_bug_end;
3934 tg3_start_xmit_dma_bug(nskb, tp->dev);
3940 return NETDEV_TX_OK;
3944 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3945 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3947 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3949 struct tg3 *tp = netdev_priv(dev);
3951 u32 len, entry, base_flags, mss;
3952 int would_hit_hwbug;
3954 len = skb_headlen(skb);
3956 /* We are running in BH disabled context with netif_tx_lock
3957 * and TX reclaim runs via tp->poll inside of a software
3958 * interrupt. Furthermore, IRQ processing runs lockless so we have
3959 * no IRQ context deadlocks to worry about either. Rejoice!
3961 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3962 if (!netif_queue_stopped(dev)) {
3963 netif_stop_queue(dev);
3965 /* This is a hard error, log it. */
3966 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3967 "queue awake!\n", dev->name);
3969 return NETDEV_TX_BUSY;
3972 entry = tp->tx_prod;
3974 if (skb->ip_summed == CHECKSUM_HW)
3975 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3976 #if TG3_TSO_SUPPORT != 0
3978 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3979 (mss = skb_shinfo(skb)->gso_size) != 0) {
3980 int tcp_opt_len, ip_tcp_len, hdr_len;
3982 if (skb_header_cloned(skb) &&
3983 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3988 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3989 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3991 hdr_len = ip_tcp_len + tcp_opt_len;
3992 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3993 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3994 return (tg3_tso_bug(tp, skb));
3996 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3997 TXD_FLAG_CPU_POST_DMA);
3999 skb->nh.iph->check = 0;
4000 skb->nh.iph->tot_len = htons(mss + hdr_len);
4001 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4002 skb->h.th->check = 0;
4003 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4007 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4012 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4013 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4014 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4017 tsflags = ((skb->nh.iph->ihl - 5) +
4018 (tcp_opt_len >> 2));
4019 mss |= (tsflags << 11);
4022 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4025 tsflags = ((skb->nh.iph->ihl - 5) +
4026 (tcp_opt_len >> 2));
4027 base_flags |= tsflags << 12;
4034 #if TG3_VLAN_TAG_USED
4035 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4036 base_flags |= (TXD_FLAG_VLAN |
4037 (vlan_tx_tag_get(skb) << 16));
4040 /* Queue skb data, a.k.a. the main skb fragment. */
4041 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4043 tp->tx_buffers[entry].skb = skb;
4044 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4046 would_hit_hwbug = 0;
4048 if (tg3_4g_overflow_test(mapping, len))
4049 would_hit_hwbug = 1;
4051 tg3_set_txd(tp, entry, mapping, len, base_flags,
4052 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4054 entry = NEXT_TX(entry);
4056 /* Now loop through additional data fragments, and queue them. */
4057 if (skb_shinfo(skb)->nr_frags > 0) {
4058 unsigned int i, last;
4060 last = skb_shinfo(skb)->nr_frags - 1;
4061 for (i = 0; i <= last; i++) {
4062 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4065 mapping = pci_map_page(tp->pdev,
4068 len, PCI_DMA_TODEVICE);
4070 tp->tx_buffers[entry].skb = NULL;
4071 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4073 if (tg3_4g_overflow_test(mapping, len))
4074 would_hit_hwbug = 1;
4076 if (tg3_40bit_overflow_test(tp, mapping, len))
4077 would_hit_hwbug = 1;
4079 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4080 tg3_set_txd(tp, entry, mapping, len,
4081 base_flags, (i == last)|(mss << 1));
4083 tg3_set_txd(tp, entry, mapping, len,
4084 base_flags, (i == last));
4086 entry = NEXT_TX(entry);
4090 if (would_hit_hwbug) {
4091 u32 last_plus_one = entry;
4094 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4095 start &= (TG3_TX_RING_SIZE - 1);
4097 /* If the workaround fails due to memory/mapping
4098 * failure, silently drop this packet.
4100 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4101 &start, base_flags, mss))
4107 /* Packets are ready, update Tx producer idx local and on card. */
4108 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4110 tp->tx_prod = entry;
4111 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4112 spin_lock(&tp->tx_lock);
4113 netif_stop_queue(dev);
4114 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4115 netif_wake_queue(tp->dev);
4116 spin_unlock(&tp->tx_lock);
4122 dev->trans_start = jiffies;
4124 return NETDEV_TX_OK;
4127 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4132 if (new_mtu > ETH_DATA_LEN) {
4133 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4134 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4135 ethtool_op_set_tso(dev, 0);
4138 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4140 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4141 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4142 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4146 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4148 struct tg3 *tp = netdev_priv(dev);
4151 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4154 if (!netif_running(dev)) {
4155 /* We'll just catch it later when the
4158 tg3_set_mtu(dev, tp, new_mtu);
4164 tg3_full_lock(tp, 1);
4166 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4168 tg3_set_mtu(dev, tp, new_mtu);
4170 err = tg3_restart_hw(tp, 0);
4173 tg3_netif_start(tp);
4175 tg3_full_unlock(tp);
4180 /* Free up pending packets in all rx/tx rings.
4182 * The chip has been shut down and the driver detached from
4183 * the networking, so no interrupts or new tx packets will
4184 * end up in the driver. tp->{tx,}lock is not held and we are not
4185 * in an interrupt context and thus may sleep.
4187 static void tg3_free_rings(struct tg3 *tp)
4189 struct ring_info *rxp;
4192 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4193 rxp = &tp->rx_std_buffers[i];
4195 if (rxp->skb == NULL)
4197 pci_unmap_single(tp->pdev,
4198 pci_unmap_addr(rxp, mapping),
4199 tp->rx_pkt_buf_sz - tp->rx_offset,
4200 PCI_DMA_FROMDEVICE);
4201 dev_kfree_skb_any(rxp->skb);
4205 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4206 rxp = &tp->rx_jumbo_buffers[i];
4208 if (rxp->skb == NULL)
4210 pci_unmap_single(tp->pdev,
4211 pci_unmap_addr(rxp, mapping),
4212 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4213 PCI_DMA_FROMDEVICE);
4214 dev_kfree_skb_any(rxp->skb);
4218 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4219 struct tx_ring_info *txp;
4220 struct sk_buff *skb;
4223 txp = &tp->tx_buffers[i];
4231 pci_unmap_single(tp->pdev,
4232 pci_unmap_addr(txp, mapping),
4239 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4240 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4241 pci_unmap_page(tp->pdev,
4242 pci_unmap_addr(txp, mapping),
4243 skb_shinfo(skb)->frags[j].size,
4248 dev_kfree_skb_any(skb);
4252 /* Initialize tx/rx rings for packet processing.
4254 * The chip has been shut down and the driver detached from
4255 * the networking, so no interrupts or new tx packets will
4256 * end up in the driver. tp->{tx,}lock are held and thus
4259 static int tg3_init_rings(struct tg3 *tp)
4263 /* Free up all the SKBs. */
4266 /* Zero out all descriptors. */
4267 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4268 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4269 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4270 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4272 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4273 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4274 (tp->dev->mtu > ETH_DATA_LEN))
4275 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4277 /* Initialize invariants of the rings, we only set this
4278 * stuff once. This works because the card does not
4279 * write into the rx buffer posting rings.
4281 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4282 struct tg3_rx_buffer_desc *rxd;
4284 rxd = &tp->rx_std[i];
4285 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4287 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4288 rxd->opaque = (RXD_OPAQUE_RING_STD |
4289 (i << RXD_OPAQUE_INDEX_SHIFT));
4292 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4293 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4294 struct tg3_rx_buffer_desc *rxd;
4296 rxd = &tp->rx_jumbo[i];
4297 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4299 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4301 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4302 (i << RXD_OPAQUE_INDEX_SHIFT));
4306 /* Now allocate fresh SKBs for each rx ring. */
4307 for (i = 0; i < tp->rx_pending; i++) {
4308 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4309 printk(KERN_WARNING PFX
4310 "%s: Using a smaller RX standard ring, "
4311 "only %d out of %d buffers were allocated "
4313 tp->dev->name, i, tp->rx_pending);
4321 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4322 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4323 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4325 printk(KERN_WARNING PFX
4326 "%s: Using a smaller RX jumbo ring, "
4327 "only %d out of %d buffers were "
4328 "allocated successfully.\n",
4329 tp->dev->name, i, tp->rx_jumbo_pending);
4334 tp->rx_jumbo_pending = i;
4343 * Must not be invoked with interrupt sources disabled and
4344 * the hardware shutdown down.
4346 static void tg3_free_consistent(struct tg3 *tp)
4348 kfree(tp->rx_std_buffers);
4349 tp->rx_std_buffers = NULL;
4351 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4352 tp->rx_std, tp->rx_std_mapping);
4356 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4357 tp->rx_jumbo, tp->rx_jumbo_mapping);
4358 tp->rx_jumbo = NULL;
4361 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4362 tp->rx_rcb, tp->rx_rcb_mapping);
4366 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4367 tp->tx_ring, tp->tx_desc_mapping);
4370 if (tp->hw_status) {
4371 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4372 tp->hw_status, tp->status_mapping);
4373 tp->hw_status = NULL;
4376 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4377 tp->hw_stats, tp->stats_mapping);
4378 tp->hw_stats = NULL;
4383 * Must not be invoked with interrupt sources disabled and
4384 * the hardware shutdown down. Can sleep.
4386 static int tg3_alloc_consistent(struct tg3 *tp)
4388 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4390 TG3_RX_JUMBO_RING_SIZE)) +
4391 (sizeof(struct tx_ring_info) *
4394 if (!tp->rx_std_buffers)
4397 memset(tp->rx_std_buffers, 0,
4398 (sizeof(struct ring_info) *
4400 TG3_RX_JUMBO_RING_SIZE)) +
4401 (sizeof(struct tx_ring_info) *
4404 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4405 tp->tx_buffers = (struct tx_ring_info *)
4406 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4408 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4409 &tp->rx_std_mapping);
4413 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4414 &tp->rx_jumbo_mapping);
4419 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4420 &tp->rx_rcb_mapping);
4424 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4425 &tp->tx_desc_mapping);
4429 tp->hw_status = pci_alloc_consistent(tp->pdev,
4431 &tp->status_mapping);
4435 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4436 sizeof(struct tg3_hw_stats),
4437 &tp->stats_mapping);
4441 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4442 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4447 tg3_free_consistent(tp);
4451 #define MAX_WAIT_CNT 1000
4453 /* To stop a block, clear the enable bit and poll till it
4454 * clears. tp->lock is held.
4456 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4461 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4468 /* We can't enable/disable these bits of the
4469 * 5705/5750, just say success.
4482 for (i = 0; i < MAX_WAIT_CNT; i++) {
4485 if ((val & enable_bit) == 0)
4489 if (i == MAX_WAIT_CNT && !silent) {
4490 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4491 "ofs=%lx enable_bit=%x\n",
4499 /* tp->lock is held. */
4500 static int tg3_abort_hw(struct tg3 *tp, int silent)
4504 tg3_disable_ints(tp);
4506 tp->rx_mode &= ~RX_MODE_ENABLE;
4507 tw32_f(MAC_RX_MODE, tp->rx_mode);
4510 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4511 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4512 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4513 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4514 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4515 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4517 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4518 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4519 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4520 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4521 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4522 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4523 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4525 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4526 tw32_f(MAC_MODE, tp->mac_mode);
4529 tp->tx_mode &= ~TX_MODE_ENABLE;
4530 tw32_f(MAC_TX_MODE, tp->tx_mode);
4532 for (i = 0; i < MAX_WAIT_CNT; i++) {
4534 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4537 if (i >= MAX_WAIT_CNT) {
4538 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4539 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4540 tp->dev->name, tr32(MAC_TX_MODE));
4544 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4545 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4546 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4548 tw32(FTQ_RESET, 0xffffffff);
4549 tw32(FTQ_RESET, 0x00000000);
4551 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4552 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4555 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4557 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4562 /* tp->lock is held. */
4563 static int tg3_nvram_lock(struct tg3 *tp)
4565 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4568 if (tp->nvram_lock_cnt == 0) {
4569 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4570 for (i = 0; i < 8000; i++) {
4571 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4576 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4580 tp->nvram_lock_cnt++;
4585 /* tp->lock is held. */
4586 static void tg3_nvram_unlock(struct tg3 *tp)
4588 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4589 if (tp->nvram_lock_cnt > 0)
4590 tp->nvram_lock_cnt--;
4591 if (tp->nvram_lock_cnt == 0)
4592 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4596 /* tp->lock is held. */
4597 static void tg3_enable_nvram_access(struct tg3 *tp)
4599 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4600 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4601 u32 nvaccess = tr32(NVRAM_ACCESS);
4603 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4607 /* tp->lock is held. */
4608 static void tg3_disable_nvram_access(struct tg3 *tp)
4610 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4611 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4612 u32 nvaccess = tr32(NVRAM_ACCESS);
4614 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4618 /* tp->lock is held. */
4619 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4621 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4622 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4624 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4626 case RESET_KIND_INIT:
4627 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4631 case RESET_KIND_SHUTDOWN:
4632 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4636 case RESET_KIND_SUSPEND:
4637 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4647 /* tp->lock is held. */
4648 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4650 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4652 case RESET_KIND_INIT:
4653 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4654 DRV_STATE_START_DONE);
4657 case RESET_KIND_SHUTDOWN:
4658 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4659 DRV_STATE_UNLOAD_DONE);
4668 /* tp->lock is held. */
4669 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4671 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4673 case RESET_KIND_INIT:
4674 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4678 case RESET_KIND_SHUTDOWN:
4679 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4683 case RESET_KIND_SUSPEND:
4684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4694 static void tg3_stop_fw(struct tg3 *);
4696 /* tp->lock is held. */
4697 static int tg3_chip_reset(struct tg3 *tp)
4700 void (*write_op)(struct tg3 *, u32, u32);
4705 /* No matching tg3_nvram_unlock() after this because
4706 * chip reset below will undo the nvram lock.
4708 tp->nvram_lock_cnt = 0;
4710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4713 tw32(GRC_FASTBOOT_PC, 0);
4716 * We must avoid the readl() that normally takes place.
4717 * It locks machines, causes machine checks, and other
4718 * fun things. So, temporarily disable the 5701
4719 * hardware workaround, while we do the reset.
4721 write_op = tp->write32;
4722 if (write_op == tg3_write_flush_reg32)
4723 tp->write32 = tg3_write32;
4726 val = GRC_MISC_CFG_CORECLK_RESET;
4728 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4729 if (tr32(0x7e2c) == 0x60) {
4732 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4733 tw32(GRC_MISC_CFG, (1 << 29));
4738 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4739 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4740 tw32(GRC_MISC_CFG, val);
4742 /* restore 5701 hardware bug workaround write method */
4743 tp->write32 = write_op;
4745 /* Unfortunately, we have to delay before the PCI read back.
4746 * Some 575X chips even will not respond to a PCI cfg access
4747 * when the reset command is given to the chip.
4749 * How do these hardware designers expect things to work
4750 * properly if the PCI write is posted for a long period
4751 * of time? It is always necessary to have some method by
4752 * which a register read back can occur to push the write
4753 * out which does the reset.
4755 * For most tg3 variants the trick below was working.
4760 /* Flush PCI posted writes. The normal MMIO registers
4761 * are inaccessible at this time so this is the only
4762 * way to make this reliably (actually, this is no longer
4763 * the case, see above). I tried to use indirect
4764 * register read/write but this upset some 5701 variants.
4766 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4770 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4771 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4775 /* Wait for link training to complete. */
4776 for (i = 0; i < 5000; i++)
4779 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4780 pci_write_config_dword(tp->pdev, 0xc4,
4781 cfg_val | (1 << 15));
4783 /* Set PCIE max payload size and clear error status. */
4784 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4787 /* Re-enable indirect register accesses. */
4788 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4789 tp->misc_host_ctrl);
4791 /* Set MAX PCI retry to zero. */
4792 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4793 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4794 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4795 val |= PCISTATE_RETRY_SAME_DMA;
4796 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4798 pci_restore_state(tp->pdev);
4800 /* Make sure PCI-X relaxed ordering bit is clear. */
4801 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4802 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4803 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4805 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4808 /* Chip reset on 5780 will reset MSI enable bit,
4809 * so need to restore it.
4811 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4814 pci_read_config_word(tp->pdev,
4815 tp->msi_cap + PCI_MSI_FLAGS,
4817 pci_write_config_word(tp->pdev,
4818 tp->msi_cap + PCI_MSI_FLAGS,
4819 ctrl | PCI_MSI_FLAGS_ENABLE);
4820 val = tr32(MSGINT_MODE);
4821 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4824 val = tr32(MEMARB_MODE);
4825 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4828 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4830 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4832 tw32(0x5000, 0x400);
4835 tw32(GRC_MODE, tp->grc_mode);
4837 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4838 u32 val = tr32(0xc4);
4840 tw32(0xc4, val | (1 << 15));
4843 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4845 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4846 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4847 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4848 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4851 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4852 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4853 tw32_f(MAC_MODE, tp->mac_mode);
4854 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4855 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4856 tw32_f(MAC_MODE, tp->mac_mode);
4858 tw32_f(MAC_MODE, 0);
4861 /* Wait for firmware initialization to complete. */
4862 for (i = 0; i < 100000; i++) {
4863 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4864 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4869 /* Chip might not be fitted with firmare. Some Sun onboard
4870 * parts are configured like that. So don't signal the timeout
4871 * of the above loop as an error, but do report the lack of
4872 * running firmware once.
4875 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4876 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4878 printk(KERN_INFO PFX "%s: No firmware running.\n",
4882 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4883 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4884 u32 val = tr32(0x7c00);
4886 tw32(0x7c00, val | (1 << 25));
4889 /* Reprobe ASF enable state. */
4890 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4891 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4892 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4893 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4896 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4897 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4898 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4899 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4900 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4907 /* tp->lock is held. */
4908 static void tg3_stop_fw(struct tg3 *tp)
4910 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4914 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4915 val = tr32(GRC_RX_CPU_EVENT);
4917 tw32(GRC_RX_CPU_EVENT, val);
4919 /* Wait for RX cpu to ACK the event. */
4920 for (i = 0; i < 100; i++) {
4921 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4928 /* tp->lock is held. */
4929 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4935 tg3_write_sig_pre_reset(tp, kind);
4937 tg3_abort_hw(tp, silent);
4938 err = tg3_chip_reset(tp);
4940 tg3_write_sig_legacy(tp, kind);
4941 tg3_write_sig_post_reset(tp, kind);
4949 #define TG3_FW_RELEASE_MAJOR 0x0
4950 #define TG3_FW_RELASE_MINOR 0x0
4951 #define TG3_FW_RELEASE_FIX 0x0
4952 #define TG3_FW_START_ADDR 0x08000000
4953 #define TG3_FW_TEXT_ADDR 0x08000000
4954 #define TG3_FW_TEXT_LEN 0x9c0
4955 #define TG3_FW_RODATA_ADDR 0x080009c0
4956 #define TG3_FW_RODATA_LEN 0x60
4957 #define TG3_FW_DATA_ADDR 0x08000a40
4958 #define TG3_FW_DATA_LEN 0x20
4959 #define TG3_FW_SBSS_ADDR 0x08000a60
4960 #define TG3_FW_SBSS_LEN 0xc
4961 #define TG3_FW_BSS_ADDR 0x08000a70
4962 #define TG3_FW_BSS_LEN 0x10
4964 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4965 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4966 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4967 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4968 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4969 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4970 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4971 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4972 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4973 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4974 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4975 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4976 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4977 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4978 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4979 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4980 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4981 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4982 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4983 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4984 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4985 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4986 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4987 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4988 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4989 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4991 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4992 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4993 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4994 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4995 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4996 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4997 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4998 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4999 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5000 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5001 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5002 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5003 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5004 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5005 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5006 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5007 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5008 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5009 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5010 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5011 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5012 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5013 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5014 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5015 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5016 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5017 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5018 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5019 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5020 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5021 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5022 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5023 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5024 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5025 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5026 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5027 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5028 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5029 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5030 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5031 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5032 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5033 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5034 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5035 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5036 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5037 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5038 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5039 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5040 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5041 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5042 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5043 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5044 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5045 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5046 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5047 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5048 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5049 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5050 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5051 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5052 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5053 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5054 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5055 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5058 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5059 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5060 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5061 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5062 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5066 #if 0 /* All zeros, don't eat up space with it. */
5067 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5068 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5069 0x00000000, 0x00000000, 0x00000000, 0x00000000
5073 #define RX_CPU_SCRATCH_BASE 0x30000
5074 #define RX_CPU_SCRATCH_SIZE 0x04000
5075 #define TX_CPU_SCRATCH_BASE 0x34000
5076 #define TX_CPU_SCRATCH_SIZE 0x04000
5078 /* tp->lock is held. */
5079 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5083 BUG_ON(offset == TX_CPU_BASE &&
5084 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5086 if (offset == RX_CPU_BASE) {
5087 for (i = 0; i < 10000; i++) {
5088 tw32(offset + CPU_STATE, 0xffffffff);
5089 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5090 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5094 tw32(offset + CPU_STATE, 0xffffffff);
5095 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5098 for (i = 0; i < 10000; i++) {
5099 tw32(offset + CPU_STATE, 0xffffffff);
5100 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5101 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5107 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5110 (offset == RX_CPU_BASE ? "RX" : "TX"));
5114 /* Clear firmware's nvram arbitration. */
5115 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5116 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5121 unsigned int text_base;
5122 unsigned int text_len;
5124 unsigned int rodata_base;
5125 unsigned int rodata_len;
5127 unsigned int data_base;
5128 unsigned int data_len;
5132 /* tp->lock is held. */
5133 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5134 int cpu_scratch_size, struct fw_info *info)
5136 int err, lock_err, i;
5137 void (*write_op)(struct tg3 *, u32, u32);
5139 if (cpu_base == TX_CPU_BASE &&
5140 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5141 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5142 "TX cpu firmware on %s which is 5705.\n",
5147 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5148 write_op = tg3_write_mem;
5150 write_op = tg3_write_indirect_reg32;
5152 /* It is possible that bootcode is still loading at this point.
5153 * Get the nvram lock first before halting the cpu.
5155 lock_err = tg3_nvram_lock(tp);
5156 err = tg3_halt_cpu(tp, cpu_base);
5158 tg3_nvram_unlock(tp);
5162 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5163 write_op(tp, cpu_scratch_base + i, 0);
5164 tw32(cpu_base + CPU_STATE, 0xffffffff);
5165 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5166 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5167 write_op(tp, (cpu_scratch_base +
5168 (info->text_base & 0xffff) +
5171 info->text_data[i] : 0));
5172 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5173 write_op(tp, (cpu_scratch_base +
5174 (info->rodata_base & 0xffff) +
5176 (info->rodata_data ?
5177 info->rodata_data[i] : 0));
5178 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5179 write_op(tp, (cpu_scratch_base +
5180 (info->data_base & 0xffff) +
5183 info->data_data[i] : 0));
5191 /* tp->lock is held. */
5192 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5194 struct fw_info info;
5197 info.text_base = TG3_FW_TEXT_ADDR;
5198 info.text_len = TG3_FW_TEXT_LEN;
5199 info.text_data = &tg3FwText[0];
5200 info.rodata_base = TG3_FW_RODATA_ADDR;
5201 info.rodata_len = TG3_FW_RODATA_LEN;
5202 info.rodata_data = &tg3FwRodata[0];
5203 info.data_base = TG3_FW_DATA_ADDR;
5204 info.data_len = TG3_FW_DATA_LEN;
5205 info.data_data = NULL;
5207 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5208 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5213 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5214 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5219 /* Now startup only the RX cpu. */
5220 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5221 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5223 for (i = 0; i < 5; i++) {
5224 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5226 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5227 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5228 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5232 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5233 "to set RX CPU PC, is %08x should be %08x\n",
5234 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5238 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5239 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5244 #if TG3_TSO_SUPPORT != 0
5246 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5247 #define TG3_TSO_FW_RELASE_MINOR 0x6
5248 #define TG3_TSO_FW_RELEASE_FIX 0x0
5249 #define TG3_TSO_FW_START_ADDR 0x08000000
5250 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5251 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5252 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5253 #define TG3_TSO_FW_RODATA_LEN 0x60
5254 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5255 #define TG3_TSO_FW_DATA_LEN 0x30
5256 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5257 #define TG3_TSO_FW_SBSS_LEN 0x2c
5258 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5259 #define TG3_TSO_FW_BSS_LEN 0x894
5261 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5262 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5263 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5264 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5265 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5266 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5267 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5268 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5269 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5270 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5271 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5272 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5273 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5274 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5275 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5276 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5277 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5278 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5279 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5280 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5281 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5282 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5283 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5284 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5285 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5286 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5287 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5288 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5289 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5290 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5291 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5292 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5293 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5294 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5295 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5296 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5297 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5298 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5299 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5300 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5301 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5302 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5303 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5304 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5305 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5306 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5307 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5308 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5309 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5310 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5311 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5312 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5313 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5314 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5315 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5316 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5317 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5318 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5319 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5320 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5321 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5322 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5323 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5324 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5325 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5326 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5327 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5328 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5329 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5330 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5331 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5332 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5333 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5334 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5335 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5336 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5337 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5338 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5339 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5340 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5341 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5342 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5343 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5344 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5345 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5346 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5347 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5348 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5349 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5350 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5351 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5352 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5353 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5354 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5355 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5356 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5357 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5358 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5359 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5360 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5361 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5362 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5363 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5364 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5365 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5366 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5367 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5368 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5369 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5370 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5371 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5372 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5373 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5374 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5375 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5376 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5377 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5378 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5379 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5380 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5381 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5382 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5383 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5384 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5385 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5386 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5387 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5388 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5389 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5390 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5391 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5392 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5393 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5394 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5395 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5396 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5397 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5398 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5399 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5400 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5401 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5402 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5403 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5404 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5405 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5406 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5407 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5408 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5409 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5410 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5411 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5412 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5413 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5414 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5415 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5416 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5417 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5418 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5419 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5420 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5421 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5422 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5423 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5424 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5425 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5426 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5427 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5428 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5429 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5430 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5431 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5432 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5433 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5434 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5435 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5436 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5437 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5438 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5439 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5440 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5441 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5442 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5443 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5444 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5445 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5446 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5447 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5448 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5449 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5450 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5451 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5452 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5453 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5454 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5455 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5456 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5457 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5458 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5459 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5460 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5461 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5462 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5463 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5464 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5465 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5466 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5467 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5468 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5469 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5470 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5471 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5472 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5473 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5474 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5475 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5476 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5477 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5478 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5479 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5480 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5481 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5482 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5483 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5484 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5485 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5486 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5487 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5488 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5489 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5490 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5491 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5492 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5493 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5494 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5495 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5496 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5497 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5498 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5499 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5500 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5501 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5502 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5503 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5504 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5505 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5506 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5507 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5508 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5509 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5510 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5511 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5512 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5513 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5514 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5515 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5516 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5517 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5518 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5519 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5520 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5521 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5522 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5523 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5524 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5525 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5526 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5527 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5528 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5529 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5530 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5531 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5532 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5533 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5534 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5535 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5536 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5537 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5538 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5539 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5540 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5541 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5542 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5543 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5544 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5545 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5548 static u32 tg3TsoFwRodata[] = {
5549 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5550 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5551 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5552 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5556 static u32 tg3TsoFwData[] = {
5557 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5558 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5562 /* 5705 needs a special version of the TSO firmware. */
5563 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5564 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5565 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5566 #define TG3_TSO5_FW_START_ADDR 0x00010000
5567 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5568 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5569 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5570 #define TG3_TSO5_FW_RODATA_LEN 0x50
5571 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5572 #define TG3_TSO5_FW_DATA_LEN 0x20
5573 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5574 #define TG3_TSO5_FW_SBSS_LEN 0x28
5575 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5576 #define TG3_TSO5_FW_BSS_LEN 0x88
5578 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5579 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5580 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5581 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5582 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5583 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5584 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5585 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5586 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5587 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5588 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5589 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5590 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5591 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5592 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5593 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5594 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5595 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5596 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5597 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5598 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5599 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5600 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5601 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5602 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5603 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5604 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5605 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5606 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5607 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5608 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5609 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5610 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5611 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5612 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5613 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5614 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5615 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5616 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5617 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5618 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5619 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5620 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5621 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5622 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5623 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5624 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5625 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5626 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5627 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5628 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5629 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5630 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5631 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5632 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5633 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5634 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5635 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5636 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5637 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5638 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5639 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5640 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5641 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5642 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5643 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5644 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5645 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5646 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5647 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5648 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5649 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5650 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5651 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5652 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5653 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5654 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5655 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5656 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5657 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5658 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5659 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5660 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5661 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5662 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5663 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5664 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5665 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5666 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5667 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5668 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5669 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5670 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5671 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5672 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5673 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5674 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5675 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5676 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5677 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5678 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5679 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5680 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5681 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5682 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5683 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5684 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5685 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5686 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5687 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5688 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5689 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5690 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5691 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5692 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5693 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5694 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5695 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5696 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5697 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5698 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5699 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5700 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5701 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5702 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5703 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5704 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5705 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5706 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5707 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5708 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5709 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5710 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5711 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5712 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5713 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5714 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5715 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5716 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5717 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5718 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5719 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5720 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5721 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5722 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5723 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5724 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5725 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5726 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5727 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5728 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5729 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5730 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5731 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5732 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5733 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5734 0x00000000, 0x00000000, 0x00000000,
5737 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5738 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5739 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5740 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5741 0x00000000, 0x00000000, 0x00000000,
5744 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5745 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5746 0x00000000, 0x00000000, 0x00000000,
5749 /* tp->lock is held. */
5750 static int tg3_load_tso_firmware(struct tg3 *tp)
5752 struct fw_info info;
5753 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5756 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5760 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5761 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5762 info.text_data = &tg3Tso5FwText[0];
5763 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5764 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5765 info.rodata_data = &tg3Tso5FwRodata[0];
5766 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5767 info.data_len = TG3_TSO5_FW_DATA_LEN;
5768 info.data_data = &tg3Tso5FwData[0];
5769 cpu_base = RX_CPU_BASE;
5770 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5771 cpu_scratch_size = (info.text_len +
5774 TG3_TSO5_FW_SBSS_LEN +
5775 TG3_TSO5_FW_BSS_LEN);
5777 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5778 info.text_len = TG3_TSO_FW_TEXT_LEN;
5779 info.text_data = &tg3TsoFwText[0];
5780 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5781 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5782 info.rodata_data = &tg3TsoFwRodata[0];
5783 info.data_base = TG3_TSO_FW_DATA_ADDR;
5784 info.data_len = TG3_TSO_FW_DATA_LEN;
5785 info.data_data = &tg3TsoFwData[0];
5786 cpu_base = TX_CPU_BASE;
5787 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5788 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5791 err = tg3_load_firmware_cpu(tp, cpu_base,
5792 cpu_scratch_base, cpu_scratch_size,
5797 /* Now startup the cpu. */
5798 tw32(cpu_base + CPU_STATE, 0xffffffff);
5799 tw32_f(cpu_base + CPU_PC, info.text_base);
5801 for (i = 0; i < 5; i++) {
5802 if (tr32(cpu_base + CPU_PC) == info.text_base)
5804 tw32(cpu_base + CPU_STATE, 0xffffffff);
5805 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5806 tw32_f(cpu_base + CPU_PC, info.text_base);
5810 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5811 "to set CPU PC, is %08x should be %08x\n",
5812 tp->dev->name, tr32(cpu_base + CPU_PC),
5816 tw32(cpu_base + CPU_STATE, 0xffffffff);
5817 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5821 #endif /* TG3_TSO_SUPPORT != 0 */
5823 /* tp->lock is held. */
5824 static void __tg3_set_mac_addr(struct tg3 *tp)
5826 u32 addr_high, addr_low;
5829 addr_high = ((tp->dev->dev_addr[0] << 8) |
5830 tp->dev->dev_addr[1]);
5831 addr_low = ((tp->dev->dev_addr[2] << 24) |
5832 (tp->dev->dev_addr[3] << 16) |
5833 (tp->dev->dev_addr[4] << 8) |
5834 (tp->dev->dev_addr[5] << 0));
5835 for (i = 0; i < 4; i++) {
5836 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5837 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5842 for (i = 0; i < 12; i++) {
5843 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5844 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5848 addr_high = (tp->dev->dev_addr[0] +
5849 tp->dev->dev_addr[1] +
5850 tp->dev->dev_addr[2] +
5851 tp->dev->dev_addr[3] +
5852 tp->dev->dev_addr[4] +
5853 tp->dev->dev_addr[5]) &
5854 TX_BACKOFF_SEED_MASK;
5855 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5858 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5860 struct tg3 *tp = netdev_priv(dev);
5861 struct sockaddr *addr = p;
5864 if (!is_valid_ether_addr(addr->sa_data))
5867 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5869 if (!netif_running(dev))
5872 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5873 /* Reset chip so that ASF can re-init any MAC addresses it
5877 tg3_full_lock(tp, 1);
5879 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5880 err = tg3_restart_hw(tp, 0);
5882 tg3_netif_start(tp);
5883 tg3_full_unlock(tp);
5885 spin_lock_bh(&tp->lock);
5886 __tg3_set_mac_addr(tp);
5887 spin_unlock_bh(&tp->lock);
5893 /* tp->lock is held. */
5894 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5895 dma_addr_t mapping, u32 maxlen_flags,
5899 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5900 ((u64) mapping >> 32));
5902 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5903 ((u64) mapping & 0xffffffff));
5905 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5908 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5910 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5914 static void __tg3_set_rx_mode(struct net_device *);
5915 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5917 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5918 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5919 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5920 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5921 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5922 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5923 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5925 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5926 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5927 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5928 u32 val = ec->stats_block_coalesce_usecs;
5930 if (!netif_carrier_ok(tp->dev))
5933 tw32(HOSTCC_STAT_COAL_TICKS, val);
5937 /* tp->lock is held. */
5938 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5940 u32 val, rdmac_mode;
5943 tg3_disable_ints(tp);
5947 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5949 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5950 tg3_abort_hw(tp, 1);
5953 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5956 err = tg3_chip_reset(tp);
5960 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5962 /* This works around an issue with Athlon chipsets on
5963 * B3 tigon3 silicon. This bit has no effect on any
5964 * other revision. But do not set this on PCI Express
5967 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5968 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5969 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5971 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5972 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5973 val = tr32(TG3PCI_PCISTATE);
5974 val |= PCISTATE_RETRY_SAME_DMA;
5975 tw32(TG3PCI_PCISTATE, val);
5978 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5979 /* Enable some hw fixes. */
5980 val = tr32(TG3PCI_MSI_DATA);
5981 val |= (1 << 26) | (1 << 28) | (1 << 29);
5982 tw32(TG3PCI_MSI_DATA, val);
5985 /* Descriptor ring init may make accesses to the
5986 * NIC SRAM area to setup the TX descriptors, so we
5987 * can only do this after the hardware has been
5988 * successfully reset.
5990 err = tg3_init_rings(tp);
5994 /* This value is determined during the probe time DMA
5995 * engine test, tg3_test_dma.
5997 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5999 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6000 GRC_MODE_4X_NIC_SEND_RINGS |
6001 GRC_MODE_NO_TX_PHDR_CSUM |
6002 GRC_MODE_NO_RX_PHDR_CSUM);
6003 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6005 /* Pseudo-header checksum is done by hardware logic and not
6006 * the offload processers, so make the chip do the pseudo-
6007 * header checksums on receive. For transmit it is more
6008 * convenient to do the pseudo-header checksum in software
6009 * as Linux does that on transmit for us in all cases.
6011 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6015 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6017 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6018 val = tr32(GRC_MISC_CFG);
6020 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6021 tw32(GRC_MISC_CFG, val);
6023 /* Initialize MBUF/DESC pool. */
6024 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6026 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6027 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6029 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6031 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6032 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6033 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6035 #if TG3_TSO_SUPPORT != 0
6036 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6039 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6040 TG3_TSO5_FW_RODATA_LEN +
6041 TG3_TSO5_FW_DATA_LEN +
6042 TG3_TSO5_FW_SBSS_LEN +
6043 TG3_TSO5_FW_BSS_LEN);
6044 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6045 tw32(BUFMGR_MB_POOL_ADDR,
6046 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6047 tw32(BUFMGR_MB_POOL_SIZE,
6048 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6052 if (tp->dev->mtu <= ETH_DATA_LEN) {
6053 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6054 tp->bufmgr_config.mbuf_read_dma_low_water);
6055 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6056 tp->bufmgr_config.mbuf_mac_rx_low_water);
6057 tw32(BUFMGR_MB_HIGH_WATER,
6058 tp->bufmgr_config.mbuf_high_water);
6060 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6061 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6062 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6063 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6064 tw32(BUFMGR_MB_HIGH_WATER,
6065 tp->bufmgr_config.mbuf_high_water_jumbo);
6067 tw32(BUFMGR_DMA_LOW_WATER,
6068 tp->bufmgr_config.dma_low_water);
6069 tw32(BUFMGR_DMA_HIGH_WATER,
6070 tp->bufmgr_config.dma_high_water);
6072 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6073 for (i = 0; i < 2000; i++) {
6074 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6079 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6084 /* Setup replenish threshold. */
6085 val = tp->rx_pending / 8;
6088 else if (val > tp->rx_std_max_post)
6089 val = tp->rx_std_max_post;
6091 tw32(RCVBDI_STD_THRESH, val);
6093 /* Initialize TG3_BDINFO's at:
6094 * RCVDBDI_STD_BD: standard eth size rx ring
6095 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6096 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6099 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6100 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6101 * ring attribute flags
6102 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6104 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6105 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6107 * The size of each ring is fixed in the firmware, but the location is
6110 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6111 ((u64) tp->rx_std_mapping >> 32));
6112 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6113 ((u64) tp->rx_std_mapping & 0xffffffff));
6114 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6115 NIC_SRAM_RX_BUFFER_DESC);
6117 /* Don't even try to program the JUMBO/MINI buffer descriptor
6120 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6121 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6122 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6124 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6125 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6127 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6128 BDINFO_FLAGS_DISABLED);
6130 /* Setup replenish threshold. */
6131 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6133 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6134 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6135 ((u64) tp->rx_jumbo_mapping >> 32));
6136 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6137 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6138 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6139 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6140 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6141 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6143 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6144 BDINFO_FLAGS_DISABLED);
6149 /* There is only one send ring on 5705/5750, no need to explicitly
6150 * disable the others.
6152 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6153 /* Clear out send RCB ring in SRAM. */
6154 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6155 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6156 BDINFO_FLAGS_DISABLED);
6161 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6162 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6164 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6165 tp->tx_desc_mapping,
6166 (TG3_TX_RING_SIZE <<
6167 BDINFO_FLAGS_MAXLEN_SHIFT),
6168 NIC_SRAM_TX_BUFFER_DESC);
6170 /* There is only one receive return ring on 5705/5750, no need
6171 * to explicitly disable the others.
6173 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6174 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6175 i += TG3_BDINFO_SIZE) {
6176 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6177 BDINFO_FLAGS_DISABLED);
6182 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6184 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6186 (TG3_RX_RCB_RING_SIZE(tp) <<
6187 BDINFO_FLAGS_MAXLEN_SHIFT),
6190 tp->rx_std_ptr = tp->rx_pending;
6191 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6194 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6195 tp->rx_jumbo_pending : 0;
6196 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6199 /* Initialize MAC address and backoff seed. */
6200 __tg3_set_mac_addr(tp);
6202 /* MTU + ethernet header + FCS + optional VLAN tag */
6203 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6205 /* The slot time is changed by tg3_setup_phy if we
6206 * run at gigabit with half duplex.
6208 tw32(MAC_TX_LENGTHS,
6209 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6210 (6 << TX_LENGTHS_IPG_SHIFT) |
6211 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6213 /* Receive rules. */
6214 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6215 tw32(RCVLPC_CONFIG, 0x0181);
6217 /* Calculate RDMAC_MODE setting early, we need it to determine
6218 * the RCVLPC_STATE_ENABLE mask.
6220 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6221 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6222 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6223 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6224 RDMAC_MODE_LNGREAD_ENAB);
6225 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6226 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6228 /* If statement applies to 5705 and 5750 PCI devices only */
6229 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6230 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6231 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6232 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6233 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6234 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6235 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6236 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6237 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6238 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6242 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6243 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6245 #if TG3_TSO_SUPPORT != 0
6246 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6247 rdmac_mode |= (1 << 27);
6250 /* Receive/send statistics. */
6251 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6252 val = tr32(RCVLPC_STATS_ENABLE);
6253 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6254 tw32(RCVLPC_STATS_ENABLE, val);
6255 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6256 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6257 val = tr32(RCVLPC_STATS_ENABLE);
6258 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6259 tw32(RCVLPC_STATS_ENABLE, val);
6261 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6263 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6264 tw32(SNDDATAI_STATSENAB, 0xffffff);
6265 tw32(SNDDATAI_STATSCTRL,
6266 (SNDDATAI_SCTRL_ENABLE |
6267 SNDDATAI_SCTRL_FASTUPD));
6269 /* Setup host coalescing engine. */
6270 tw32(HOSTCC_MODE, 0);
6271 for (i = 0; i < 2000; i++) {
6272 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6277 __tg3_set_coalesce(tp, &tp->coal);
6279 /* set status block DMA address */
6280 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6281 ((u64) tp->status_mapping >> 32));
6282 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6283 ((u64) tp->status_mapping & 0xffffffff));
6285 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6286 /* Status/statistics block address. See tg3_timer,
6287 * the tg3_periodic_fetch_stats call there, and
6288 * tg3_get_stats to see how this works for 5705/5750 chips.
6290 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6291 ((u64) tp->stats_mapping >> 32));
6292 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6293 ((u64) tp->stats_mapping & 0xffffffff));
6294 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6295 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6298 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6300 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6301 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6302 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6303 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6305 /* Clear statistics/status block in chip, and status block in ram. */
6306 for (i = NIC_SRAM_STATS_BLK;
6307 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6309 tg3_write_mem(tp, i, 0);
6312 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6314 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6315 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6316 /* reset to prevent losing 1st rx packet intermittently */
6317 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6321 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6322 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6323 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6326 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6327 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6328 * register to preserve the GPIO settings for LOMs. The GPIOs,
6329 * whether used as inputs or outputs, are set by boot code after
6332 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6335 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6336 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6339 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6340 GRC_LCLCTRL_GPIO_OUTPUT3;
6342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6343 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6345 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6347 /* GPIO1 must be driven high for eeprom write protect */
6348 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6349 GRC_LCLCTRL_GPIO_OUTPUT1);
6351 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6354 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6357 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6358 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6362 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6363 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6364 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6365 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6366 WDMAC_MODE_LNGREAD_ENAB);
6368 /* If statement applies to 5705 and 5750 PCI devices only */
6369 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6370 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6372 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6373 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6374 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6376 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6377 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6378 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6379 val |= WDMAC_MODE_RX_ACCEL;
6383 /* Enable host coalescing bug fix */
6384 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6385 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6388 tw32_f(WDMAC_MODE, val);
6391 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6392 val = tr32(TG3PCI_X_CAPS);
6393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6394 val &= ~PCIX_CAPS_BURST_MASK;
6395 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6396 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6397 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6398 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6399 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6400 val |= (tp->split_mode_max_reqs <<
6401 PCIX_CAPS_SPLIT_SHIFT);
6403 tw32(TG3PCI_X_CAPS, val);
6406 tw32_f(RDMAC_MODE, rdmac_mode);
6409 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6410 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6411 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6412 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6413 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6414 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6415 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6416 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6417 #if TG3_TSO_SUPPORT != 0
6418 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6419 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6421 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6422 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6424 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6425 err = tg3_load_5701_a0_firmware_fix(tp);
6430 #if TG3_TSO_SUPPORT != 0
6431 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6432 err = tg3_load_tso_firmware(tp);
6438 tp->tx_mode = TX_MODE_ENABLE;
6439 tw32_f(MAC_TX_MODE, tp->tx_mode);
6442 tp->rx_mode = RX_MODE_ENABLE;
6443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6444 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6446 tw32_f(MAC_RX_MODE, tp->rx_mode);
6449 if (tp->link_config.phy_is_low_power) {
6450 tp->link_config.phy_is_low_power = 0;
6451 tp->link_config.speed = tp->link_config.orig_speed;
6452 tp->link_config.duplex = tp->link_config.orig_duplex;
6453 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6456 tp->mi_mode = MAC_MI_MODE_BASE;
6457 tw32_f(MAC_MI_MODE, tp->mi_mode);
6460 tw32(MAC_LED_CTRL, tp->led_ctrl);
6462 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6463 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6464 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6467 tw32_f(MAC_RX_MODE, tp->rx_mode);
6470 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6471 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6472 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6473 /* Set drive transmission level to 1.2V */
6474 /* only if the signal pre-emphasis bit is not set */
6475 val = tr32(MAC_SERDES_CFG);
6478 tw32(MAC_SERDES_CFG, val);
6480 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6481 tw32(MAC_SERDES_CFG, 0x616000);
6484 /* Prevent chip from dropping frames when flow control
6487 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6490 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6491 /* Use hardware link auto-negotiation */
6492 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6495 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6496 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6499 tmp = tr32(SERDES_RX_CTRL);
6500 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6501 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6502 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6503 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6506 err = tg3_setup_phy(tp, reset_phy);
6510 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6513 /* Clear CRC stats. */
6514 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6515 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6516 tg3_readphy(tp, 0x14, &tmp);
6520 __tg3_set_rx_mode(tp->dev);
6522 /* Initialize receive rules. */
6523 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6524 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6525 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6526 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6528 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6529 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6533 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6537 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6539 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6541 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6543 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6545 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6547 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6549 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6551 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6553 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6555 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6557 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6559 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6561 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6563 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6571 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6576 /* Called at device open time to get the chip ready for
6577 * packet processing. Invoked with tp->lock held.
6579 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6583 /* Force the chip into D0. */
6584 err = tg3_set_power_state(tp, PCI_D0);
6588 tg3_switch_clocks(tp);
6590 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6592 err = tg3_reset_hw(tp, reset_phy);
6598 #define TG3_STAT_ADD32(PSTAT, REG) \
6599 do { u32 __val = tr32(REG); \
6600 (PSTAT)->low += __val; \
6601 if ((PSTAT)->low < __val) \
6602 (PSTAT)->high += 1; \
6605 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6607 struct tg3_hw_stats *sp = tp->hw_stats;
6609 if (!netif_carrier_ok(tp->dev))
6612 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6613 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6614 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6615 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6616 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6617 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6618 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6619 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6620 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6621 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6622 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6623 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6624 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6626 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6627 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6628 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6629 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6630 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6631 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6632 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6633 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6634 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6635 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6636 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6637 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6638 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6639 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6641 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6642 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6643 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6646 static void tg3_timer(unsigned long __opaque)
6648 struct tg3 *tp = (struct tg3 *) __opaque;
6653 spin_lock(&tp->lock);
6655 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6656 /* All of this garbage is because when using non-tagged
6657 * IRQ status the mailbox/status_block protocol the chip
6658 * uses with the cpu is race prone.
6660 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6661 tw32(GRC_LOCAL_CTRL,
6662 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6664 tw32(HOSTCC_MODE, tp->coalesce_mode |
6665 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6668 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6669 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6670 spin_unlock(&tp->lock);
6671 schedule_work(&tp->reset_task);
6676 /* This part only runs once per second. */
6677 if (!--tp->timer_counter) {
6678 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6679 tg3_periodic_fetch_stats(tp);
6681 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6685 mac_stat = tr32(MAC_STATUS);
6688 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6689 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6691 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6695 tg3_setup_phy(tp, 0);
6696 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6697 u32 mac_stat = tr32(MAC_STATUS);
6700 if (netif_carrier_ok(tp->dev) &&
6701 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6704 if (! netif_carrier_ok(tp->dev) &&
6705 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6706 MAC_STATUS_SIGNAL_DET))) {
6712 ~MAC_MODE_PORT_MODE_MASK));
6714 tw32_f(MAC_MODE, tp->mac_mode);
6716 tg3_setup_phy(tp, 0);
6718 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6719 tg3_serdes_parallel_detect(tp);
6721 tp->timer_counter = tp->timer_multiplier;
6724 /* Heartbeat is only sent once every 2 seconds. */
6725 if (!--tp->asf_counter) {
6726 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6729 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6730 FWCMD_NICDRV_ALIVE2);
6731 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6732 /* 5 seconds timeout */
6733 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6734 val = tr32(GRC_RX_CPU_EVENT);
6736 tw32(GRC_RX_CPU_EVENT, val);
6738 tp->asf_counter = tp->asf_multiplier;
6741 spin_unlock(&tp->lock);
6744 tp->timer.expires = jiffies + tp->timer_offset;
6745 add_timer(&tp->timer);
6748 static int tg3_request_irq(struct tg3 *tp)
6750 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6751 unsigned long flags;
6752 struct net_device *dev = tp->dev;
6754 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6756 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6758 flags = IRQF_SAMPLE_RANDOM;
6761 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6762 fn = tg3_interrupt_tagged;
6763 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6765 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6768 static int tg3_test_interrupt(struct tg3 *tp)
6770 struct net_device *dev = tp->dev;
6774 if (!netif_running(dev))
6777 tg3_disable_ints(tp);
6779 free_irq(tp->pdev->irq, dev);
6781 err = request_irq(tp->pdev->irq, tg3_test_isr,
6782 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6786 tp->hw_status->status &= ~SD_STATUS_UPDATED;
6787 tg3_enable_ints(tp);
6789 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6792 for (i = 0; i < 5; i++) {
6793 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6800 tg3_disable_ints(tp);
6802 free_irq(tp->pdev->irq, dev);
6804 err = tg3_request_irq(tp);
6815 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6816 * successfully restored
6818 static int tg3_test_msi(struct tg3 *tp)
6820 struct net_device *dev = tp->dev;
6824 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6827 /* Turn off SERR reporting in case MSI terminates with Master
6830 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6831 pci_write_config_word(tp->pdev, PCI_COMMAND,
6832 pci_cmd & ~PCI_COMMAND_SERR);
6834 err = tg3_test_interrupt(tp);
6836 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6841 /* other failures */
6845 /* MSI test failed, go back to INTx mode */
6846 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6847 "switching to INTx mode. Please report this failure to "
6848 "the PCI maintainer and include system chipset information.\n",
6851 free_irq(tp->pdev->irq, dev);
6852 pci_disable_msi(tp->pdev);
6854 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6856 err = tg3_request_irq(tp);
6860 /* Need to reset the chip because the MSI cycle may have terminated
6861 * with Master Abort.
6863 tg3_full_lock(tp, 1);
6865 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6866 err = tg3_init_hw(tp, 1);
6868 tg3_full_unlock(tp);
6871 free_irq(tp->pdev->irq, dev);
6876 static int tg3_open(struct net_device *dev)
6878 struct tg3 *tp = netdev_priv(dev);
6881 tg3_full_lock(tp, 0);
6883 err = tg3_set_power_state(tp, PCI_D0);
6887 tg3_disable_ints(tp);
6888 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6890 tg3_full_unlock(tp);
6892 /* The placement of this call is tied
6893 * to the setup and use of Host TX descriptors.
6895 err = tg3_alloc_consistent(tp);
6899 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6900 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6901 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6902 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6903 (tp->pdev_peer == tp->pdev))) {
6904 /* All MSI supporting chips should support tagged
6905 * status. Assert that this is the case.
6907 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6908 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6909 "Not using MSI.\n", tp->dev->name);
6910 } else if (pci_enable_msi(tp->pdev) == 0) {
6913 msi_mode = tr32(MSGINT_MODE);
6914 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6915 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6918 err = tg3_request_irq(tp);
6921 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6922 pci_disable_msi(tp->pdev);
6923 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6925 tg3_free_consistent(tp);
6929 tg3_full_lock(tp, 0);
6931 err = tg3_init_hw(tp, 1);
6933 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6936 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6937 tp->timer_offset = HZ;
6939 tp->timer_offset = HZ / 10;
6941 BUG_ON(tp->timer_offset > HZ);
6942 tp->timer_counter = tp->timer_multiplier =
6943 (HZ / tp->timer_offset);
6944 tp->asf_counter = tp->asf_multiplier =
6945 ((HZ / tp->timer_offset) * 2);
6947 init_timer(&tp->timer);
6948 tp->timer.expires = jiffies + tp->timer_offset;
6949 tp->timer.data = (unsigned long) tp;
6950 tp->timer.function = tg3_timer;
6953 tg3_full_unlock(tp);
6956 free_irq(tp->pdev->irq, dev);
6957 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6958 pci_disable_msi(tp->pdev);
6959 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6961 tg3_free_consistent(tp);
6965 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6966 err = tg3_test_msi(tp);
6969 tg3_full_lock(tp, 0);
6971 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6972 pci_disable_msi(tp->pdev);
6973 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6975 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6977 tg3_free_consistent(tp);
6979 tg3_full_unlock(tp);
6984 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6985 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6986 u32 val = tr32(0x7c04);
6988 tw32(0x7c04, val | (1 << 29));
6993 tg3_full_lock(tp, 0);
6995 add_timer(&tp->timer);
6996 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6997 tg3_enable_ints(tp);
6999 tg3_full_unlock(tp);
7001 netif_start_queue(dev);
7007 /*static*/ void tg3_dump_state(struct tg3 *tp)
7009 u32 val32, val32_2, val32_3, val32_4, val32_5;
7013 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7014 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7015 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7019 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7020 tr32(MAC_MODE), tr32(MAC_STATUS));
7021 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7022 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7023 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7024 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7025 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7026 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7028 /* Send data initiator control block */
7029 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7030 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7031 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7032 tr32(SNDDATAI_STATSCTRL));
7034 /* Send data completion control block */
7035 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7037 /* Send BD ring selector block */
7038 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7039 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7041 /* Send BD initiator control block */
7042 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7043 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7045 /* Send BD completion control block */
7046 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7048 /* Receive list placement control block */
7049 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7050 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7051 printk(" RCVLPC_STATSCTRL[%08x]\n",
7052 tr32(RCVLPC_STATSCTRL));
7054 /* Receive data and receive BD initiator control block */
7055 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7056 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7058 /* Receive data completion control block */
7059 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7062 /* Receive BD initiator control block */
7063 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7064 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7066 /* Receive BD completion control block */
7067 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7068 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7070 /* Receive list selector control block */
7071 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7072 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7074 /* Mbuf cluster free block */
7075 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7076 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7078 /* Host coalescing control block */
7079 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7080 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7081 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7082 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7083 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7084 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7085 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7086 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7087 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7088 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7089 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7090 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7092 /* Memory arbiter control block */
7093 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7094 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7096 /* Buffer manager control block */
7097 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7098 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7099 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7100 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7101 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7102 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7103 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7104 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7106 /* Read DMA control block */
7107 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7108 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7110 /* Write DMA control block */
7111 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7112 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7114 /* DMA completion block */
7115 printk("DEBUG: DMAC_MODE[%08x]\n",
7119 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7120 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7121 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7122 tr32(GRC_LOCAL_CTRL));
7125 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7126 tr32(RCVDBDI_JUMBO_BD + 0x0),
7127 tr32(RCVDBDI_JUMBO_BD + 0x4),
7128 tr32(RCVDBDI_JUMBO_BD + 0x8),
7129 tr32(RCVDBDI_JUMBO_BD + 0xc));
7130 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7131 tr32(RCVDBDI_STD_BD + 0x0),
7132 tr32(RCVDBDI_STD_BD + 0x4),
7133 tr32(RCVDBDI_STD_BD + 0x8),
7134 tr32(RCVDBDI_STD_BD + 0xc));
7135 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7136 tr32(RCVDBDI_MINI_BD + 0x0),
7137 tr32(RCVDBDI_MINI_BD + 0x4),
7138 tr32(RCVDBDI_MINI_BD + 0x8),
7139 tr32(RCVDBDI_MINI_BD + 0xc));
7141 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7142 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7143 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7144 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7145 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7146 val32, val32_2, val32_3, val32_4);
7148 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7149 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7150 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7151 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7152 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7153 val32, val32_2, val32_3, val32_4);
7155 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7156 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7157 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7158 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7159 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7160 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7161 val32, val32_2, val32_3, val32_4, val32_5);
7163 /* SW status block */
7164 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7165 tp->hw_status->status,
7166 tp->hw_status->status_tag,
7167 tp->hw_status->rx_jumbo_consumer,
7168 tp->hw_status->rx_consumer,
7169 tp->hw_status->rx_mini_consumer,
7170 tp->hw_status->idx[0].rx_producer,
7171 tp->hw_status->idx[0].tx_consumer);
7173 /* SW statistics block */
7174 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7175 ((u32 *)tp->hw_stats)[0],
7176 ((u32 *)tp->hw_stats)[1],
7177 ((u32 *)tp->hw_stats)[2],
7178 ((u32 *)tp->hw_stats)[3]);
7181 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7182 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7183 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7184 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7185 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7187 /* NIC side send descriptors. */
7188 for (i = 0; i < 6; i++) {
7191 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7192 + (i * sizeof(struct tg3_tx_buffer_desc));
7193 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7195 readl(txd + 0x0), readl(txd + 0x4),
7196 readl(txd + 0x8), readl(txd + 0xc));
7199 /* NIC side RX descriptors. */
7200 for (i = 0; i < 6; i++) {
7203 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7204 + (i * sizeof(struct tg3_rx_buffer_desc));
7205 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7207 readl(rxd + 0x0), readl(rxd + 0x4),
7208 readl(rxd + 0x8), readl(rxd + 0xc));
7209 rxd += (4 * sizeof(u32));
7210 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7212 readl(rxd + 0x0), readl(rxd + 0x4),
7213 readl(rxd + 0x8), readl(rxd + 0xc));
7216 for (i = 0; i < 6; i++) {
7219 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7220 + (i * sizeof(struct tg3_rx_buffer_desc));
7221 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7223 readl(rxd + 0x0), readl(rxd + 0x4),
7224 readl(rxd + 0x8), readl(rxd + 0xc));
7225 rxd += (4 * sizeof(u32));
7226 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7228 readl(rxd + 0x0), readl(rxd + 0x4),
7229 readl(rxd + 0x8), readl(rxd + 0xc));
7234 static struct net_device_stats *tg3_get_stats(struct net_device *);
7235 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7237 static int tg3_close(struct net_device *dev)
7239 struct tg3 *tp = netdev_priv(dev);
7241 /* Calling flush_scheduled_work() may deadlock because
7242 * linkwatch_event() may be on the workqueue and it will try to get
7243 * the rtnl_lock which we are holding.
7245 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7248 netif_stop_queue(dev);
7250 del_timer_sync(&tp->timer);
7252 tg3_full_lock(tp, 1);
7257 tg3_disable_ints(tp);
7259 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7262 ~(TG3_FLAG_INIT_COMPLETE |
7263 TG3_FLAG_GOT_SERDES_FLOWCTL);
7265 tg3_full_unlock(tp);
7267 free_irq(tp->pdev->irq, dev);
7268 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7269 pci_disable_msi(tp->pdev);
7270 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7273 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7274 sizeof(tp->net_stats_prev));
7275 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7276 sizeof(tp->estats_prev));
7278 tg3_free_consistent(tp);
7280 tg3_set_power_state(tp, PCI_D3hot);
7282 netif_carrier_off(tp->dev);
7287 static inline unsigned long get_stat64(tg3_stat64_t *val)
7291 #if (BITS_PER_LONG == 32)
7294 ret = ((u64)val->high << 32) | ((u64)val->low);
7299 static unsigned long calc_crc_errors(struct tg3 *tp)
7301 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7303 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7308 spin_lock_bh(&tp->lock);
7309 if (!tg3_readphy(tp, 0x1e, &val)) {
7310 tg3_writephy(tp, 0x1e, val | 0x8000);
7311 tg3_readphy(tp, 0x14, &val);
7314 spin_unlock_bh(&tp->lock);
7316 tp->phy_crc_errors += val;
7318 return tp->phy_crc_errors;
7321 return get_stat64(&hw_stats->rx_fcs_errors);
7324 #define ESTAT_ADD(member) \
7325 estats->member = old_estats->member + \
7326 get_stat64(&hw_stats->member)
7328 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7330 struct tg3_ethtool_stats *estats = &tp->estats;
7331 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7332 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7337 ESTAT_ADD(rx_octets);
7338 ESTAT_ADD(rx_fragments);
7339 ESTAT_ADD(rx_ucast_packets);
7340 ESTAT_ADD(rx_mcast_packets);
7341 ESTAT_ADD(rx_bcast_packets);
7342 ESTAT_ADD(rx_fcs_errors);
7343 ESTAT_ADD(rx_align_errors);
7344 ESTAT_ADD(rx_xon_pause_rcvd);
7345 ESTAT_ADD(rx_xoff_pause_rcvd);
7346 ESTAT_ADD(rx_mac_ctrl_rcvd);
7347 ESTAT_ADD(rx_xoff_entered);
7348 ESTAT_ADD(rx_frame_too_long_errors);
7349 ESTAT_ADD(rx_jabbers);
7350 ESTAT_ADD(rx_undersize_packets);
7351 ESTAT_ADD(rx_in_length_errors);
7352 ESTAT_ADD(rx_out_length_errors);
7353 ESTAT_ADD(rx_64_or_less_octet_packets);
7354 ESTAT_ADD(rx_65_to_127_octet_packets);
7355 ESTAT_ADD(rx_128_to_255_octet_packets);
7356 ESTAT_ADD(rx_256_to_511_octet_packets);
7357 ESTAT_ADD(rx_512_to_1023_octet_packets);
7358 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7359 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7360 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7361 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7362 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7364 ESTAT_ADD(tx_octets);
7365 ESTAT_ADD(tx_collisions);
7366 ESTAT_ADD(tx_xon_sent);
7367 ESTAT_ADD(tx_xoff_sent);
7368 ESTAT_ADD(tx_flow_control);
7369 ESTAT_ADD(tx_mac_errors);
7370 ESTAT_ADD(tx_single_collisions);
7371 ESTAT_ADD(tx_mult_collisions);
7372 ESTAT_ADD(tx_deferred);
7373 ESTAT_ADD(tx_excessive_collisions);
7374 ESTAT_ADD(tx_late_collisions);
7375 ESTAT_ADD(tx_collide_2times);
7376 ESTAT_ADD(tx_collide_3times);
7377 ESTAT_ADD(tx_collide_4times);
7378 ESTAT_ADD(tx_collide_5times);
7379 ESTAT_ADD(tx_collide_6times);
7380 ESTAT_ADD(tx_collide_7times);
7381 ESTAT_ADD(tx_collide_8times);
7382 ESTAT_ADD(tx_collide_9times);
7383 ESTAT_ADD(tx_collide_10times);
7384 ESTAT_ADD(tx_collide_11times);
7385 ESTAT_ADD(tx_collide_12times);
7386 ESTAT_ADD(tx_collide_13times);
7387 ESTAT_ADD(tx_collide_14times);
7388 ESTAT_ADD(tx_collide_15times);
7389 ESTAT_ADD(tx_ucast_packets);
7390 ESTAT_ADD(tx_mcast_packets);
7391 ESTAT_ADD(tx_bcast_packets);
7392 ESTAT_ADD(tx_carrier_sense_errors);
7393 ESTAT_ADD(tx_discards);
7394 ESTAT_ADD(tx_errors);
7396 ESTAT_ADD(dma_writeq_full);
7397 ESTAT_ADD(dma_write_prioq_full);
7398 ESTAT_ADD(rxbds_empty);
7399 ESTAT_ADD(rx_discards);
7400 ESTAT_ADD(rx_errors);
7401 ESTAT_ADD(rx_threshold_hit);
7403 ESTAT_ADD(dma_readq_full);
7404 ESTAT_ADD(dma_read_prioq_full);
7405 ESTAT_ADD(tx_comp_queue_full);
7407 ESTAT_ADD(ring_set_send_prod_index);
7408 ESTAT_ADD(ring_status_update);
7409 ESTAT_ADD(nic_irqs);
7410 ESTAT_ADD(nic_avoided_irqs);
7411 ESTAT_ADD(nic_tx_threshold_hit);
7416 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7418 struct tg3 *tp = netdev_priv(dev);
7419 struct net_device_stats *stats = &tp->net_stats;
7420 struct net_device_stats *old_stats = &tp->net_stats_prev;
7421 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7426 stats->rx_packets = old_stats->rx_packets +
7427 get_stat64(&hw_stats->rx_ucast_packets) +
7428 get_stat64(&hw_stats->rx_mcast_packets) +
7429 get_stat64(&hw_stats->rx_bcast_packets);
7431 stats->tx_packets = old_stats->tx_packets +
7432 get_stat64(&hw_stats->tx_ucast_packets) +
7433 get_stat64(&hw_stats->tx_mcast_packets) +
7434 get_stat64(&hw_stats->tx_bcast_packets);
7436 stats->rx_bytes = old_stats->rx_bytes +
7437 get_stat64(&hw_stats->rx_octets);
7438 stats->tx_bytes = old_stats->tx_bytes +
7439 get_stat64(&hw_stats->tx_octets);
7441 stats->rx_errors = old_stats->rx_errors +
7442 get_stat64(&hw_stats->rx_errors);
7443 stats->tx_errors = old_stats->tx_errors +
7444 get_stat64(&hw_stats->tx_errors) +
7445 get_stat64(&hw_stats->tx_mac_errors) +
7446 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7447 get_stat64(&hw_stats->tx_discards);
7449 stats->multicast = old_stats->multicast +
7450 get_stat64(&hw_stats->rx_mcast_packets);
7451 stats->collisions = old_stats->collisions +
7452 get_stat64(&hw_stats->tx_collisions);
7454 stats->rx_length_errors = old_stats->rx_length_errors +
7455 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7456 get_stat64(&hw_stats->rx_undersize_packets);
7458 stats->rx_over_errors = old_stats->rx_over_errors +
7459 get_stat64(&hw_stats->rxbds_empty);
7460 stats->rx_frame_errors = old_stats->rx_frame_errors +
7461 get_stat64(&hw_stats->rx_align_errors);
7462 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7463 get_stat64(&hw_stats->tx_discards);
7464 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7465 get_stat64(&hw_stats->tx_carrier_sense_errors);
7467 stats->rx_crc_errors = old_stats->rx_crc_errors +
7468 calc_crc_errors(tp);
7470 stats->rx_missed_errors = old_stats->rx_missed_errors +
7471 get_stat64(&hw_stats->rx_discards);
7476 static inline u32 calc_crc(unsigned char *buf, int len)
7484 for (j = 0; j < len; j++) {
7487 for (k = 0; k < 8; k++) {
7501 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7503 /* accept or reject all multicast frames */
7504 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7505 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7506 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7507 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7510 static void __tg3_set_rx_mode(struct net_device *dev)
7512 struct tg3 *tp = netdev_priv(dev);
7515 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7516 RX_MODE_KEEP_VLAN_TAG);
7518 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7521 #if TG3_VLAN_TAG_USED
7523 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7524 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7526 /* By definition, VLAN is disabled always in this
7529 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7530 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7533 if (dev->flags & IFF_PROMISC) {
7534 /* Promiscuous mode. */
7535 rx_mode |= RX_MODE_PROMISC;
7536 } else if (dev->flags & IFF_ALLMULTI) {
7537 /* Accept all multicast. */
7538 tg3_set_multi (tp, 1);
7539 } else if (dev->mc_count < 1) {
7540 /* Reject all multicast. */
7541 tg3_set_multi (tp, 0);
7543 /* Accept one or more multicast(s). */
7544 struct dev_mc_list *mclist;
7546 u32 mc_filter[4] = { 0, };
7551 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7552 i++, mclist = mclist->next) {
7554 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7556 regidx = (bit & 0x60) >> 5;
7558 mc_filter[regidx] |= (1 << bit);
7561 tw32(MAC_HASH_REG_0, mc_filter[0]);
7562 tw32(MAC_HASH_REG_1, mc_filter[1]);
7563 tw32(MAC_HASH_REG_2, mc_filter[2]);
7564 tw32(MAC_HASH_REG_3, mc_filter[3]);
7567 if (rx_mode != tp->rx_mode) {
7568 tp->rx_mode = rx_mode;
7569 tw32_f(MAC_RX_MODE, rx_mode);
7574 static void tg3_set_rx_mode(struct net_device *dev)
7576 struct tg3 *tp = netdev_priv(dev);
7578 if (!netif_running(dev))
7581 tg3_full_lock(tp, 0);
7582 __tg3_set_rx_mode(dev);
7583 tg3_full_unlock(tp);
7586 #define TG3_REGDUMP_LEN (32 * 1024)
7588 static int tg3_get_regs_len(struct net_device *dev)
7590 return TG3_REGDUMP_LEN;
7593 static void tg3_get_regs(struct net_device *dev,
7594 struct ethtool_regs *regs, void *_p)
7597 struct tg3 *tp = netdev_priv(dev);
7603 memset(p, 0, TG3_REGDUMP_LEN);
7605 if (tp->link_config.phy_is_low_power)
7608 tg3_full_lock(tp, 0);
7610 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7611 #define GET_REG32_LOOP(base,len) \
7612 do { p = (u32 *)(orig_p + (base)); \
7613 for (i = 0; i < len; i += 4) \
7614 __GET_REG32((base) + i); \
7616 #define GET_REG32_1(reg) \
7617 do { p = (u32 *)(orig_p + (reg)); \
7618 __GET_REG32((reg)); \
7621 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7622 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7623 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7624 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7625 GET_REG32_1(SNDDATAC_MODE);
7626 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7627 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7628 GET_REG32_1(SNDBDC_MODE);
7629 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7630 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7631 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7632 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7633 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7634 GET_REG32_1(RCVDCC_MODE);
7635 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7636 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7637 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7638 GET_REG32_1(MBFREE_MODE);
7639 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7640 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7641 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7642 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7643 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7644 GET_REG32_1(RX_CPU_MODE);
7645 GET_REG32_1(RX_CPU_STATE);
7646 GET_REG32_1(RX_CPU_PGMCTR);
7647 GET_REG32_1(RX_CPU_HWBKPT);
7648 GET_REG32_1(TX_CPU_MODE);
7649 GET_REG32_1(TX_CPU_STATE);
7650 GET_REG32_1(TX_CPU_PGMCTR);
7651 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7652 GET_REG32_LOOP(FTQ_RESET, 0x120);
7653 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7654 GET_REG32_1(DMAC_MODE);
7655 GET_REG32_LOOP(GRC_MODE, 0x4c);
7656 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7657 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7660 #undef GET_REG32_LOOP
7663 tg3_full_unlock(tp);
7666 static int tg3_get_eeprom_len(struct net_device *dev)
7668 struct tg3 *tp = netdev_priv(dev);
7670 return tp->nvram_size;
7673 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7674 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7676 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7678 struct tg3 *tp = netdev_priv(dev);
7681 u32 i, offset, len, val, b_offset, b_count;
7683 if (tp->link_config.phy_is_low_power)
7686 offset = eeprom->offset;
7690 eeprom->magic = TG3_EEPROM_MAGIC;
7693 /* adjustments to start on required 4 byte boundary */
7694 b_offset = offset & 3;
7695 b_count = 4 - b_offset;
7696 if (b_count > len) {
7697 /* i.e. offset=1 len=2 */
7700 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7703 val = cpu_to_le32(val);
7704 memcpy(data, ((char*)&val) + b_offset, b_count);
7707 eeprom->len += b_count;
7710 /* read bytes upto the last 4 byte boundary */
7711 pd = &data[eeprom->len];
7712 for (i = 0; i < (len - (len & 3)); i += 4) {
7713 ret = tg3_nvram_read(tp, offset + i, &val);
7718 val = cpu_to_le32(val);
7719 memcpy(pd + i, &val, 4);
7724 /* read last bytes not ending on 4 byte boundary */
7725 pd = &data[eeprom->len];
7727 b_offset = offset + len - b_count;
7728 ret = tg3_nvram_read(tp, b_offset, &val);
7731 val = cpu_to_le32(val);
7732 memcpy(pd, ((char*)&val), b_count);
7733 eeprom->len += b_count;
7738 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7740 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7742 struct tg3 *tp = netdev_priv(dev);
7744 u32 offset, len, b_offset, odd_len, start, end;
7747 if (tp->link_config.phy_is_low_power)
7750 if (eeprom->magic != TG3_EEPROM_MAGIC)
7753 offset = eeprom->offset;
7756 if ((b_offset = (offset & 3))) {
7757 /* adjustments to start on required 4 byte boundary */
7758 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7761 start = cpu_to_le32(start);
7770 /* adjustments to end on required 4 byte boundary */
7772 len = (len + 3) & ~3;
7773 ret = tg3_nvram_read(tp, offset+len-4, &end);
7776 end = cpu_to_le32(end);
7780 if (b_offset || odd_len) {
7781 buf = kmalloc(len, GFP_KERNEL);
7785 memcpy(buf, &start, 4);
7787 memcpy(buf+len-4, &end, 4);
7788 memcpy(buf + b_offset, data, eeprom->len);
7791 ret = tg3_nvram_write_block(tp, offset, len, buf);
7799 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7801 struct tg3 *tp = netdev_priv(dev);
7803 cmd->supported = (SUPPORTED_Autoneg);
7805 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7806 cmd->supported |= (SUPPORTED_1000baseT_Half |
7807 SUPPORTED_1000baseT_Full);
7809 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7810 cmd->supported |= (SUPPORTED_100baseT_Half |
7811 SUPPORTED_100baseT_Full |
7812 SUPPORTED_10baseT_Half |
7813 SUPPORTED_10baseT_Full |
7815 cmd->port = PORT_TP;
7817 cmd->supported |= SUPPORTED_FIBRE;
7818 cmd->port = PORT_FIBRE;
7821 cmd->advertising = tp->link_config.advertising;
7822 if (netif_running(dev)) {
7823 cmd->speed = tp->link_config.active_speed;
7824 cmd->duplex = tp->link_config.active_duplex;
7826 cmd->phy_address = PHY_ADDR;
7827 cmd->transceiver = 0;
7828 cmd->autoneg = tp->link_config.autoneg;
7834 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7836 struct tg3 *tp = netdev_priv(dev);
7838 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7839 /* These are the only valid advertisement bits allowed. */
7840 if (cmd->autoneg == AUTONEG_ENABLE &&
7841 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7842 ADVERTISED_1000baseT_Full |
7843 ADVERTISED_Autoneg |
7846 /* Fiber can only do SPEED_1000. */
7847 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7848 (cmd->speed != SPEED_1000))
7850 /* Copper cannot force SPEED_1000. */
7851 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7852 (cmd->speed == SPEED_1000))
7854 else if ((cmd->speed == SPEED_1000) &&
7855 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7858 tg3_full_lock(tp, 0);
7860 tp->link_config.autoneg = cmd->autoneg;
7861 if (cmd->autoneg == AUTONEG_ENABLE) {
7862 tp->link_config.advertising = cmd->advertising;
7863 tp->link_config.speed = SPEED_INVALID;
7864 tp->link_config.duplex = DUPLEX_INVALID;
7866 tp->link_config.advertising = 0;
7867 tp->link_config.speed = cmd->speed;
7868 tp->link_config.duplex = cmd->duplex;
7871 if (netif_running(dev))
7872 tg3_setup_phy(tp, 1);
7874 tg3_full_unlock(tp);
7879 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7881 struct tg3 *tp = netdev_priv(dev);
7883 strcpy(info->driver, DRV_MODULE_NAME);
7884 strcpy(info->version, DRV_MODULE_VERSION);
7885 strcpy(info->fw_version, tp->fw_ver);
7886 strcpy(info->bus_info, pci_name(tp->pdev));
7889 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7891 struct tg3 *tp = netdev_priv(dev);
7893 wol->supported = WAKE_MAGIC;
7895 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7896 wol->wolopts = WAKE_MAGIC;
7897 memset(&wol->sopass, 0, sizeof(wol->sopass));
7900 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7902 struct tg3 *tp = netdev_priv(dev);
7904 if (wol->wolopts & ~WAKE_MAGIC)
7906 if ((wol->wolopts & WAKE_MAGIC) &&
7907 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7908 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7911 spin_lock_bh(&tp->lock);
7912 if (wol->wolopts & WAKE_MAGIC)
7913 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7915 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7916 spin_unlock_bh(&tp->lock);
7921 static u32 tg3_get_msglevel(struct net_device *dev)
7923 struct tg3 *tp = netdev_priv(dev);
7924 return tp->msg_enable;
7927 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7929 struct tg3 *tp = netdev_priv(dev);
7930 tp->msg_enable = value;
7933 #if TG3_TSO_SUPPORT != 0
7934 static int tg3_set_tso(struct net_device *dev, u32 value)
7936 struct tg3 *tp = netdev_priv(dev);
7938 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7943 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7945 dev->features |= NETIF_F_TSO6;
7947 dev->features &= ~NETIF_F_TSO6;
7949 return ethtool_op_set_tso(dev, value);
7953 static int tg3_nway_reset(struct net_device *dev)
7955 struct tg3 *tp = netdev_priv(dev);
7959 if (!netif_running(dev))
7962 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7965 spin_lock_bh(&tp->lock);
7967 tg3_readphy(tp, MII_BMCR, &bmcr);
7968 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7969 ((bmcr & BMCR_ANENABLE) ||
7970 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7971 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7975 spin_unlock_bh(&tp->lock);
7980 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7982 struct tg3 *tp = netdev_priv(dev);
7984 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7985 ering->rx_mini_max_pending = 0;
7986 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7987 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7989 ering->rx_jumbo_max_pending = 0;
7991 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7993 ering->rx_pending = tp->rx_pending;
7994 ering->rx_mini_pending = 0;
7995 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7996 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7998 ering->rx_jumbo_pending = 0;
8000 ering->tx_pending = tp->tx_pending;
8003 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8005 struct tg3 *tp = netdev_priv(dev);
8006 int irq_sync = 0, err = 0;
8008 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8009 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8010 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8013 if (netif_running(dev)) {
8018 tg3_full_lock(tp, irq_sync);
8020 tp->rx_pending = ering->rx_pending;
8022 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8023 tp->rx_pending > 63)
8024 tp->rx_pending = 63;
8025 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8026 tp->tx_pending = ering->tx_pending;
8028 if (netif_running(dev)) {
8029 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8030 err = tg3_restart_hw(tp, 1);
8032 tg3_netif_start(tp);
8035 tg3_full_unlock(tp);
8040 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8042 struct tg3 *tp = netdev_priv(dev);
8044 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8045 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8046 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8049 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8051 struct tg3 *tp = netdev_priv(dev);
8052 int irq_sync = 0, err = 0;
8054 if (netif_running(dev)) {
8059 tg3_full_lock(tp, irq_sync);
8061 if (epause->autoneg)
8062 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8064 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8065 if (epause->rx_pause)
8066 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8068 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8069 if (epause->tx_pause)
8070 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8072 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8074 if (netif_running(dev)) {
8075 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8076 err = tg3_restart_hw(tp, 1);
8078 tg3_netif_start(tp);
8081 tg3_full_unlock(tp);
8086 static u32 tg3_get_rx_csum(struct net_device *dev)
8088 struct tg3 *tp = netdev_priv(dev);
8089 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8092 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8094 struct tg3 *tp = netdev_priv(dev);
8096 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8102 spin_lock_bh(&tp->lock);
8104 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8106 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8107 spin_unlock_bh(&tp->lock);
8112 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8114 struct tg3 *tp = netdev_priv(dev);
8116 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8124 ethtool_op_set_tx_hw_csum(dev, data);
8126 ethtool_op_set_tx_csum(dev, data);
8131 static int tg3_get_stats_count (struct net_device *dev)
8133 return TG3_NUM_STATS;
8136 static int tg3_get_test_count (struct net_device *dev)
8138 return TG3_NUM_TEST;
8141 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8143 switch (stringset) {
8145 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
8148 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
8151 WARN_ON(1); /* we need a WARN() */
8156 static int tg3_phys_id(struct net_device *dev, u32 data)
8158 struct tg3 *tp = netdev_priv(dev);
8161 if (!netif_running(tp->dev))
8167 for (i = 0; i < (data * 2); i++) {
8169 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8170 LED_CTRL_1000MBPS_ON |
8171 LED_CTRL_100MBPS_ON |
8172 LED_CTRL_10MBPS_ON |
8173 LED_CTRL_TRAFFIC_OVERRIDE |
8174 LED_CTRL_TRAFFIC_BLINK |
8175 LED_CTRL_TRAFFIC_LED);
8178 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8179 LED_CTRL_TRAFFIC_OVERRIDE);
8181 if (msleep_interruptible(500))
8184 tw32(MAC_LED_CTRL, tp->led_ctrl);
8188 static void tg3_get_ethtool_stats (struct net_device *dev,
8189 struct ethtool_stats *estats, u64 *tmp_stats)
8191 struct tg3 *tp = netdev_priv(dev);
8192 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8195 #define NVRAM_TEST_SIZE 0x100
8196 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8198 static int tg3_test_nvram(struct tg3 *tp)
8200 u32 *buf, csum, magic;
8201 int i, j, err = 0, size;
8203 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8206 if (magic == TG3_EEPROM_MAGIC)
8207 size = NVRAM_TEST_SIZE;
8208 else if ((magic & 0xff000000) == 0xa5000000) {
8209 if ((magic & 0xe00000) == 0x200000)
8210 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8216 buf = kmalloc(size, GFP_KERNEL);
8221 for (i = 0, j = 0; i < size; i += 4, j++) {
8224 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8226 buf[j] = cpu_to_le32(val);
8231 /* Selfboot format */
8232 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8233 u8 *buf8 = (u8 *) buf, csum8 = 0;
8235 for (i = 0; i < size; i++)
8247 /* Bootstrap checksum at offset 0x10 */
8248 csum = calc_crc((unsigned char *) buf, 0x10);
8249 if(csum != cpu_to_le32(buf[0x10/4]))
8252 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8253 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8254 if (csum != cpu_to_le32(buf[0xfc/4]))
8264 #define TG3_SERDES_TIMEOUT_SEC 2
8265 #define TG3_COPPER_TIMEOUT_SEC 6
8267 static int tg3_test_link(struct tg3 *tp)
8271 if (!netif_running(tp->dev))
8274 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8275 max = TG3_SERDES_TIMEOUT_SEC;
8277 max = TG3_COPPER_TIMEOUT_SEC;
8279 for (i = 0; i < max; i++) {
8280 if (netif_carrier_ok(tp->dev))
8283 if (msleep_interruptible(1000))
8290 /* Only test the commonly used registers */
8291 static int tg3_test_registers(struct tg3 *tp)
8294 u32 offset, read_mask, write_mask, val, save_val, read_val;
8298 #define TG3_FL_5705 0x1
8299 #define TG3_FL_NOT_5705 0x2
8300 #define TG3_FL_NOT_5788 0x4
8304 /* MAC Control Registers */
8305 { MAC_MODE, TG3_FL_NOT_5705,
8306 0x00000000, 0x00ef6f8c },
8307 { MAC_MODE, TG3_FL_5705,
8308 0x00000000, 0x01ef6b8c },
8309 { MAC_STATUS, TG3_FL_NOT_5705,
8310 0x03800107, 0x00000000 },
8311 { MAC_STATUS, TG3_FL_5705,
8312 0x03800100, 0x00000000 },
8313 { MAC_ADDR_0_HIGH, 0x0000,
8314 0x00000000, 0x0000ffff },
8315 { MAC_ADDR_0_LOW, 0x0000,
8316 0x00000000, 0xffffffff },
8317 { MAC_RX_MTU_SIZE, 0x0000,
8318 0x00000000, 0x0000ffff },
8319 { MAC_TX_MODE, 0x0000,
8320 0x00000000, 0x00000070 },
8321 { MAC_TX_LENGTHS, 0x0000,
8322 0x00000000, 0x00003fff },
8323 { MAC_RX_MODE, TG3_FL_NOT_5705,
8324 0x00000000, 0x000007fc },
8325 { MAC_RX_MODE, TG3_FL_5705,
8326 0x00000000, 0x000007dc },
8327 { MAC_HASH_REG_0, 0x0000,
8328 0x00000000, 0xffffffff },
8329 { MAC_HASH_REG_1, 0x0000,
8330 0x00000000, 0xffffffff },
8331 { MAC_HASH_REG_2, 0x0000,
8332 0x00000000, 0xffffffff },
8333 { MAC_HASH_REG_3, 0x0000,
8334 0x00000000, 0xffffffff },
8336 /* Receive Data and Receive BD Initiator Control Registers. */
8337 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8338 0x00000000, 0xffffffff },
8339 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8340 0x00000000, 0xffffffff },
8341 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8342 0x00000000, 0x00000003 },
8343 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8344 0x00000000, 0xffffffff },
8345 { RCVDBDI_STD_BD+0, 0x0000,
8346 0x00000000, 0xffffffff },
8347 { RCVDBDI_STD_BD+4, 0x0000,
8348 0x00000000, 0xffffffff },
8349 { RCVDBDI_STD_BD+8, 0x0000,
8350 0x00000000, 0xffff0002 },
8351 { RCVDBDI_STD_BD+0xc, 0x0000,
8352 0x00000000, 0xffffffff },
8354 /* Receive BD Initiator Control Registers. */
8355 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8356 0x00000000, 0xffffffff },
8357 { RCVBDI_STD_THRESH, TG3_FL_5705,
8358 0x00000000, 0x000003ff },
8359 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8360 0x00000000, 0xffffffff },
8362 /* Host Coalescing Control Registers. */
8363 { HOSTCC_MODE, TG3_FL_NOT_5705,
8364 0x00000000, 0x00000004 },
8365 { HOSTCC_MODE, TG3_FL_5705,
8366 0x00000000, 0x000000f6 },
8367 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8368 0x00000000, 0xffffffff },
8369 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8370 0x00000000, 0x000003ff },
8371 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8372 0x00000000, 0xffffffff },
8373 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8374 0x00000000, 0x000003ff },
8375 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8376 0x00000000, 0xffffffff },
8377 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8378 0x00000000, 0x000000ff },
8379 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8380 0x00000000, 0xffffffff },
8381 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8382 0x00000000, 0x000000ff },
8383 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8384 0x00000000, 0xffffffff },
8385 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8386 0x00000000, 0xffffffff },
8387 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8388 0x00000000, 0xffffffff },
8389 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8390 0x00000000, 0x000000ff },
8391 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8392 0x00000000, 0xffffffff },
8393 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8394 0x00000000, 0x000000ff },
8395 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8396 0x00000000, 0xffffffff },
8397 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8398 0x00000000, 0xffffffff },
8399 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8400 0x00000000, 0xffffffff },
8401 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8402 0x00000000, 0xffffffff },
8403 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8404 0x00000000, 0xffffffff },
8405 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8406 0xffffffff, 0x00000000 },
8407 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8408 0xffffffff, 0x00000000 },
8410 /* Buffer Manager Control Registers. */
8411 { BUFMGR_MB_POOL_ADDR, 0x0000,
8412 0x00000000, 0x007fff80 },
8413 { BUFMGR_MB_POOL_SIZE, 0x0000,
8414 0x00000000, 0x007fffff },
8415 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8416 0x00000000, 0x0000003f },
8417 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8418 0x00000000, 0x000001ff },
8419 { BUFMGR_MB_HIGH_WATER, 0x0000,
8420 0x00000000, 0x000001ff },
8421 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8422 0xffffffff, 0x00000000 },
8423 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8424 0xffffffff, 0x00000000 },
8426 /* Mailbox Registers */
8427 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8428 0x00000000, 0x000001ff },
8429 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8430 0x00000000, 0x000001ff },
8431 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8432 0x00000000, 0x000007ff },
8433 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8434 0x00000000, 0x000001ff },
8436 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8439 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8444 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8445 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8448 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8451 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8452 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8455 offset = (u32) reg_tbl[i].offset;
8456 read_mask = reg_tbl[i].read_mask;
8457 write_mask = reg_tbl[i].write_mask;
8459 /* Save the original register content */
8460 save_val = tr32(offset);
8462 /* Determine the read-only value. */
8463 read_val = save_val & read_mask;
8465 /* Write zero to the register, then make sure the read-only bits
8466 * are not changed and the read/write bits are all zeros.
8472 /* Test the read-only and read/write bits. */
8473 if (((val & read_mask) != read_val) || (val & write_mask))
8476 /* Write ones to all the bits defined by RdMask and WrMask, then
8477 * make sure the read-only bits are not changed and the
8478 * read/write bits are all ones.
8480 tw32(offset, read_mask | write_mask);
8484 /* Test the read-only bits. */
8485 if ((val & read_mask) != read_val)
8488 /* Test the read/write bits. */
8489 if ((val & write_mask) != write_mask)
8492 tw32(offset, save_val);
8498 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8499 tw32(offset, save_val);
8503 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8505 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8509 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8510 for (j = 0; j < len; j += 4) {
8513 tg3_write_mem(tp, offset + j, test_pattern[i]);
8514 tg3_read_mem(tp, offset + j, &val);
8515 if (val != test_pattern[i])
8522 static int tg3_test_memory(struct tg3 *tp)
8524 static struct mem_entry {
8527 } mem_tbl_570x[] = {
8528 { 0x00000000, 0x00b50},
8529 { 0x00002000, 0x1c000},
8530 { 0xffffffff, 0x00000}
8531 }, mem_tbl_5705[] = {
8532 { 0x00000100, 0x0000c},
8533 { 0x00000200, 0x00008},
8534 { 0x00004000, 0x00800},
8535 { 0x00006000, 0x01000},
8536 { 0x00008000, 0x02000},
8537 { 0x00010000, 0x0e000},
8538 { 0xffffffff, 0x00000}
8539 }, mem_tbl_5755[] = {
8540 { 0x00000200, 0x00008},
8541 { 0x00004000, 0x00800},
8542 { 0x00006000, 0x00800},
8543 { 0x00008000, 0x02000},
8544 { 0x00010000, 0x0c000},
8545 { 0xffffffff, 0x00000}
8547 struct mem_entry *mem_tbl;
8551 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8552 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8554 mem_tbl = mem_tbl_5755;
8556 mem_tbl = mem_tbl_5705;
8558 mem_tbl = mem_tbl_570x;
8560 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8561 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8562 mem_tbl[i].len)) != 0)
8569 #define TG3_MAC_LOOPBACK 0
8570 #define TG3_PHY_LOOPBACK 1
8572 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8574 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8576 struct sk_buff *skb, *rx_skb;
8579 int num_pkts, tx_len, rx_len, i, err;
8580 struct tg3_rx_buffer_desc *desc;
8582 if (loopback_mode == TG3_MAC_LOOPBACK) {
8583 /* HW errata - mac loopback fails in some cases on 5780.
8584 * Normal traffic and PHY loopback are not affected by
8587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8590 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8591 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8592 MAC_MODE_PORT_MODE_GMII;
8593 tw32(MAC_MODE, mac_mode);
8594 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8595 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8598 /* reset to prevent losing 1st rx packet intermittently */
8599 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8600 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8602 tw32_f(MAC_RX_MODE, tp->rx_mode);
8604 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8605 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8606 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8607 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8608 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8609 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8611 tw32(MAC_MODE, mac_mode);
8619 skb = netdev_alloc_skb(tp->dev, tx_len);
8623 tx_data = skb_put(skb, tx_len);
8624 memcpy(tx_data, tp->dev->dev_addr, 6);
8625 memset(tx_data + 6, 0x0, 8);
8627 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8629 for (i = 14; i < tx_len; i++)
8630 tx_data[i] = (u8) (i & 0xff);
8632 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8634 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8639 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8643 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8648 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8650 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8654 for (i = 0; i < 10; i++) {
8655 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8660 tx_idx = tp->hw_status->idx[0].tx_consumer;
8661 rx_idx = tp->hw_status->idx[0].rx_producer;
8662 if ((tx_idx == tp->tx_prod) &&
8663 (rx_idx == (rx_start_idx + num_pkts)))
8667 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8670 if (tx_idx != tp->tx_prod)
8673 if (rx_idx != rx_start_idx + num_pkts)
8676 desc = &tp->rx_rcb[rx_start_idx];
8677 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8678 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8679 if (opaque_key != RXD_OPAQUE_RING_STD)
8682 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8683 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8686 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8687 if (rx_len != tx_len)
8690 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8692 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8693 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8695 for (i = 14; i < tx_len; i++) {
8696 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8701 /* tg3_free_rings will unmap and free the rx_skb */
8706 #define TG3_MAC_LOOPBACK_FAILED 1
8707 #define TG3_PHY_LOOPBACK_FAILED 2
8708 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8709 TG3_PHY_LOOPBACK_FAILED)
8711 static int tg3_test_loopback(struct tg3 *tp)
8715 if (!netif_running(tp->dev))
8716 return TG3_LOOPBACK_FAILED;
8718 err = tg3_reset_hw(tp, 1);
8720 return TG3_LOOPBACK_FAILED;
8722 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8723 err |= TG3_MAC_LOOPBACK_FAILED;
8724 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8725 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8726 err |= TG3_PHY_LOOPBACK_FAILED;
8732 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8735 struct tg3 *tp = netdev_priv(dev);
8737 if (tp->link_config.phy_is_low_power)
8738 tg3_set_power_state(tp, PCI_D0);
8740 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8742 if (tg3_test_nvram(tp) != 0) {
8743 etest->flags |= ETH_TEST_FL_FAILED;
8746 if (tg3_test_link(tp) != 0) {
8747 etest->flags |= ETH_TEST_FL_FAILED;
8750 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8751 int err, irq_sync = 0;
8753 if (netif_running(dev)) {
8758 tg3_full_lock(tp, irq_sync);
8760 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8761 err = tg3_nvram_lock(tp);
8762 tg3_halt_cpu(tp, RX_CPU_BASE);
8763 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8764 tg3_halt_cpu(tp, TX_CPU_BASE);
8766 tg3_nvram_unlock(tp);
8768 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8771 if (tg3_test_registers(tp) != 0) {
8772 etest->flags |= ETH_TEST_FL_FAILED;
8775 if (tg3_test_memory(tp) != 0) {
8776 etest->flags |= ETH_TEST_FL_FAILED;
8779 if ((data[4] = tg3_test_loopback(tp)) != 0)
8780 etest->flags |= ETH_TEST_FL_FAILED;
8782 tg3_full_unlock(tp);
8784 if (tg3_test_interrupt(tp) != 0) {
8785 etest->flags |= ETH_TEST_FL_FAILED;
8789 tg3_full_lock(tp, 0);
8791 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8792 if (netif_running(dev)) {
8793 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8794 if (!tg3_restart_hw(tp, 1))
8795 tg3_netif_start(tp);
8798 tg3_full_unlock(tp);
8800 if (tp->link_config.phy_is_low_power)
8801 tg3_set_power_state(tp, PCI_D3hot);
8805 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8807 struct mii_ioctl_data *data = if_mii(ifr);
8808 struct tg3 *tp = netdev_priv(dev);
8813 data->phy_id = PHY_ADDR;
8819 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8820 break; /* We have no PHY */
8822 if (tp->link_config.phy_is_low_power)
8825 spin_lock_bh(&tp->lock);
8826 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8827 spin_unlock_bh(&tp->lock);
8829 data->val_out = mii_regval;
8835 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8836 break; /* We have no PHY */
8838 if (!capable(CAP_NET_ADMIN))
8841 if (tp->link_config.phy_is_low_power)
8844 spin_lock_bh(&tp->lock);
8845 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8846 spin_unlock_bh(&tp->lock);
8857 #if TG3_VLAN_TAG_USED
8858 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8860 struct tg3 *tp = netdev_priv(dev);
8862 if (netif_running(dev))
8865 tg3_full_lock(tp, 0);
8869 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8870 __tg3_set_rx_mode(dev);
8872 tg3_full_unlock(tp);
8874 if (netif_running(dev))
8875 tg3_netif_start(tp);
8878 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8880 struct tg3 *tp = netdev_priv(dev);
8882 if (netif_running(dev))
8885 tg3_full_lock(tp, 0);
8887 tp->vlgrp->vlan_devices[vid] = NULL;
8888 tg3_full_unlock(tp);
8890 if (netif_running(dev))
8891 tg3_netif_start(tp);
8895 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8897 struct tg3 *tp = netdev_priv(dev);
8899 memcpy(ec, &tp->coal, sizeof(*ec));
8903 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8905 struct tg3 *tp = netdev_priv(dev);
8906 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8907 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8909 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8910 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8911 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8912 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8913 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8916 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8917 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8918 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8919 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8920 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8921 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8922 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8923 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8924 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8925 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8928 /* No rx interrupts will be generated if both are zero */
8929 if ((ec->rx_coalesce_usecs == 0) &&
8930 (ec->rx_max_coalesced_frames == 0))
8933 /* No tx interrupts will be generated if both are zero */
8934 if ((ec->tx_coalesce_usecs == 0) &&
8935 (ec->tx_max_coalesced_frames == 0))
8938 /* Only copy relevant parameters, ignore all others. */
8939 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8940 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8941 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8942 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8943 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8944 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8945 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8946 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8947 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8949 if (netif_running(dev)) {
8950 tg3_full_lock(tp, 0);
8951 __tg3_set_coalesce(tp, &tp->coal);
8952 tg3_full_unlock(tp);
8957 static struct ethtool_ops tg3_ethtool_ops = {
8958 .get_settings = tg3_get_settings,
8959 .set_settings = tg3_set_settings,
8960 .get_drvinfo = tg3_get_drvinfo,
8961 .get_regs_len = tg3_get_regs_len,
8962 .get_regs = tg3_get_regs,
8963 .get_wol = tg3_get_wol,
8964 .set_wol = tg3_set_wol,
8965 .get_msglevel = tg3_get_msglevel,
8966 .set_msglevel = tg3_set_msglevel,
8967 .nway_reset = tg3_nway_reset,
8968 .get_link = ethtool_op_get_link,
8969 .get_eeprom_len = tg3_get_eeprom_len,
8970 .get_eeprom = tg3_get_eeprom,
8971 .set_eeprom = tg3_set_eeprom,
8972 .get_ringparam = tg3_get_ringparam,
8973 .set_ringparam = tg3_set_ringparam,
8974 .get_pauseparam = tg3_get_pauseparam,
8975 .set_pauseparam = tg3_set_pauseparam,
8976 .get_rx_csum = tg3_get_rx_csum,
8977 .set_rx_csum = tg3_set_rx_csum,
8978 .get_tx_csum = ethtool_op_get_tx_csum,
8979 .set_tx_csum = tg3_set_tx_csum,
8980 .get_sg = ethtool_op_get_sg,
8981 .set_sg = ethtool_op_set_sg,
8982 #if TG3_TSO_SUPPORT != 0
8983 .get_tso = ethtool_op_get_tso,
8984 .set_tso = tg3_set_tso,
8986 .self_test_count = tg3_get_test_count,
8987 .self_test = tg3_self_test,
8988 .get_strings = tg3_get_strings,
8989 .phys_id = tg3_phys_id,
8990 .get_stats_count = tg3_get_stats_count,
8991 .get_ethtool_stats = tg3_get_ethtool_stats,
8992 .get_coalesce = tg3_get_coalesce,
8993 .set_coalesce = tg3_set_coalesce,
8994 .get_perm_addr = ethtool_op_get_perm_addr,
8997 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8999 u32 cursize, val, magic;
9001 tp->nvram_size = EEPROM_CHIP_SIZE;
9003 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9006 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9010 * Size the chip by reading offsets at increasing powers of two.
9011 * When we encounter our validation signature, we know the addressing
9012 * has wrapped around, and thus have our chip size.
9016 while (cursize < tp->nvram_size) {
9017 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9026 tp->nvram_size = cursize;
9029 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9033 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9036 /* Selfboot format */
9037 if (val != TG3_EEPROM_MAGIC) {
9038 tg3_get_eeprom_size(tp);
9042 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9044 tp->nvram_size = (val >> 16) * 1024;
9048 tp->nvram_size = 0x20000;
9051 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9055 nvcfg1 = tr32(NVRAM_CFG1);
9056 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9057 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9060 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9061 tw32(NVRAM_CFG1, nvcfg1);
9064 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9065 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9066 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9067 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9068 tp->nvram_jedecnum = JEDEC_ATMEL;
9069 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9070 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9072 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9073 tp->nvram_jedecnum = JEDEC_ATMEL;
9074 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9076 case FLASH_VENDOR_ATMEL_EEPROM:
9077 tp->nvram_jedecnum = JEDEC_ATMEL;
9078 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9079 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9081 case FLASH_VENDOR_ST:
9082 tp->nvram_jedecnum = JEDEC_ST;
9083 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9084 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9086 case FLASH_VENDOR_SAIFUN:
9087 tp->nvram_jedecnum = JEDEC_SAIFUN;
9088 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9090 case FLASH_VENDOR_SST_SMALL:
9091 case FLASH_VENDOR_SST_LARGE:
9092 tp->nvram_jedecnum = JEDEC_SST;
9093 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9098 tp->nvram_jedecnum = JEDEC_ATMEL;
9099 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9100 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9104 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9108 nvcfg1 = tr32(NVRAM_CFG1);
9110 /* NVRAM protection for TPM */
9111 if (nvcfg1 & (1 << 27))
9112 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9114 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9115 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9116 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9117 tp->nvram_jedecnum = JEDEC_ATMEL;
9118 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9120 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9121 tp->nvram_jedecnum = JEDEC_ATMEL;
9122 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9123 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9125 case FLASH_5752VENDOR_ST_M45PE10:
9126 case FLASH_5752VENDOR_ST_M45PE20:
9127 case FLASH_5752VENDOR_ST_M45PE40:
9128 tp->nvram_jedecnum = JEDEC_ST;
9129 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9130 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9134 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9135 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9136 case FLASH_5752PAGE_SIZE_256:
9137 tp->nvram_pagesize = 256;
9139 case FLASH_5752PAGE_SIZE_512:
9140 tp->nvram_pagesize = 512;
9142 case FLASH_5752PAGE_SIZE_1K:
9143 tp->nvram_pagesize = 1024;
9145 case FLASH_5752PAGE_SIZE_2K:
9146 tp->nvram_pagesize = 2048;
9148 case FLASH_5752PAGE_SIZE_4K:
9149 tp->nvram_pagesize = 4096;
9151 case FLASH_5752PAGE_SIZE_264:
9152 tp->nvram_pagesize = 264;
9157 /* For eeprom, set pagesize to maximum eeprom size */
9158 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9160 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9161 tw32(NVRAM_CFG1, nvcfg1);
9165 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9169 nvcfg1 = tr32(NVRAM_CFG1);
9171 /* NVRAM protection for TPM */
9172 if (nvcfg1 & (1 << 27))
9173 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9175 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9176 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9177 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9178 tp->nvram_jedecnum = JEDEC_ATMEL;
9179 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9180 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9182 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9183 tw32(NVRAM_CFG1, nvcfg1);
9185 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9186 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9187 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9188 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9189 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9190 tp->nvram_jedecnum = JEDEC_ATMEL;
9191 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9192 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9193 tp->nvram_pagesize = 264;
9195 case FLASH_5752VENDOR_ST_M45PE10:
9196 case FLASH_5752VENDOR_ST_M45PE20:
9197 case FLASH_5752VENDOR_ST_M45PE40:
9198 tp->nvram_jedecnum = JEDEC_ST;
9199 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9200 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9201 tp->nvram_pagesize = 256;
9206 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9210 nvcfg1 = tr32(NVRAM_CFG1);
9212 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9213 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9214 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9215 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9216 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9217 tp->nvram_jedecnum = JEDEC_ATMEL;
9218 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9219 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9221 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9222 tw32(NVRAM_CFG1, nvcfg1);
9224 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9225 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9226 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9227 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9228 tp->nvram_jedecnum = JEDEC_ATMEL;
9229 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9230 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9231 tp->nvram_pagesize = 264;
9233 case FLASH_5752VENDOR_ST_M45PE10:
9234 case FLASH_5752VENDOR_ST_M45PE20:
9235 case FLASH_5752VENDOR_ST_M45PE40:
9236 tp->nvram_jedecnum = JEDEC_ST;
9237 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9238 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9239 tp->nvram_pagesize = 256;
9244 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9245 static void __devinit tg3_nvram_init(struct tg3 *tp)
9249 tw32_f(GRC_EEPROM_ADDR,
9250 (EEPROM_ADDR_FSM_RESET |
9251 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9252 EEPROM_ADDR_CLKPERD_SHIFT)));
9254 /* XXX schedule_timeout() ... */
9255 for (j = 0; j < 100; j++)
9258 /* Enable seeprom accesses. */
9259 tw32_f(GRC_LOCAL_CTRL,
9260 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9263 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9264 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9265 tp->tg3_flags |= TG3_FLAG_NVRAM;
9267 if (tg3_nvram_lock(tp)) {
9268 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9269 "tg3_nvram_init failed.\n", tp->dev->name);
9272 tg3_enable_nvram_access(tp);
9274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9275 tg3_get_5752_nvram_info(tp);
9276 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9277 tg3_get_5755_nvram_info(tp);
9278 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9279 tg3_get_5787_nvram_info(tp);
9281 tg3_get_nvram_info(tp);
9283 tg3_get_nvram_size(tp);
9285 tg3_disable_nvram_access(tp);
9286 tg3_nvram_unlock(tp);
9289 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9291 tg3_get_eeprom_size(tp);
9295 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9296 u32 offset, u32 *val)
9301 if (offset > EEPROM_ADDR_ADDR_MASK ||
9305 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9306 EEPROM_ADDR_DEVID_MASK |
9308 tw32(GRC_EEPROM_ADDR,
9310 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9311 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9312 EEPROM_ADDR_ADDR_MASK) |
9313 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9315 for (i = 0; i < 10000; i++) {
9316 tmp = tr32(GRC_EEPROM_ADDR);
9318 if (tmp & EEPROM_ADDR_COMPLETE)
9322 if (!(tmp & EEPROM_ADDR_COMPLETE))
9325 *val = tr32(GRC_EEPROM_DATA);
9329 #define NVRAM_CMD_TIMEOUT 10000
9331 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9335 tw32(NVRAM_CMD, nvram_cmd);
9336 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9338 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9343 if (i == NVRAM_CMD_TIMEOUT) {
9349 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9351 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9352 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9353 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9354 (tp->nvram_jedecnum == JEDEC_ATMEL))
9356 addr = ((addr / tp->nvram_pagesize) <<
9357 ATMEL_AT45DB0X1B_PAGE_POS) +
9358 (addr % tp->nvram_pagesize);
9363 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9365 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9366 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9367 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9368 (tp->nvram_jedecnum == JEDEC_ATMEL))
9370 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9371 tp->nvram_pagesize) +
9372 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9377 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9381 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9382 return tg3_nvram_read_using_eeprom(tp, offset, val);
9384 offset = tg3_nvram_phys_addr(tp, offset);
9386 if (offset > NVRAM_ADDR_MSK)
9389 ret = tg3_nvram_lock(tp);
9393 tg3_enable_nvram_access(tp);
9395 tw32(NVRAM_ADDR, offset);
9396 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9397 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9400 *val = swab32(tr32(NVRAM_RDDATA));
9402 tg3_disable_nvram_access(tp);
9404 tg3_nvram_unlock(tp);
9409 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9414 err = tg3_nvram_read(tp, offset, &tmp);
9419 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9420 u32 offset, u32 len, u8 *buf)
9425 for (i = 0; i < len; i += 4) {
9430 memcpy(&data, buf + i, 4);
9432 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9434 val = tr32(GRC_EEPROM_ADDR);
9435 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9437 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9439 tw32(GRC_EEPROM_ADDR, val |
9440 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9441 (addr & EEPROM_ADDR_ADDR_MASK) |
9445 for (j = 0; j < 10000; j++) {
9446 val = tr32(GRC_EEPROM_ADDR);
9448 if (val & EEPROM_ADDR_COMPLETE)
9452 if (!(val & EEPROM_ADDR_COMPLETE)) {
9461 /* offset and length are dword aligned */
9462 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9466 u32 pagesize = tp->nvram_pagesize;
9467 u32 pagemask = pagesize - 1;
9471 tmp = kmalloc(pagesize, GFP_KERNEL);
9477 u32 phy_addr, page_off, size;
9479 phy_addr = offset & ~pagemask;
9481 for (j = 0; j < pagesize; j += 4) {
9482 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9483 (u32 *) (tmp + j))))
9489 page_off = offset & pagemask;
9496 memcpy(tmp + page_off, buf, size);
9498 offset = offset + (pagesize - page_off);
9500 tg3_enable_nvram_access(tp);
9503 * Before we can erase the flash page, we need
9504 * to issue a special "write enable" command.
9506 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9508 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9511 /* Erase the target page */
9512 tw32(NVRAM_ADDR, phy_addr);
9514 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9515 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9517 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9520 /* Issue another write enable to start the write. */
9521 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9523 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9526 for (j = 0; j < pagesize; j += 4) {
9529 data = *((u32 *) (tmp + j));
9530 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9532 tw32(NVRAM_ADDR, phy_addr + j);
9534 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9538 nvram_cmd |= NVRAM_CMD_FIRST;
9539 else if (j == (pagesize - 4))
9540 nvram_cmd |= NVRAM_CMD_LAST;
9542 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9549 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9550 tg3_nvram_exec_cmd(tp, nvram_cmd);
9557 /* offset and length are dword aligned */
9558 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9563 for (i = 0; i < len; i += 4, offset += 4) {
9564 u32 data, page_off, phy_addr, nvram_cmd;
9566 memcpy(&data, buf + i, 4);
9567 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9569 page_off = offset % tp->nvram_pagesize;
9571 phy_addr = tg3_nvram_phys_addr(tp, offset);
9573 tw32(NVRAM_ADDR, phy_addr);
9575 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9577 if ((page_off == 0) || (i == 0))
9578 nvram_cmd |= NVRAM_CMD_FIRST;
9579 if (page_off == (tp->nvram_pagesize - 4))
9580 nvram_cmd |= NVRAM_CMD_LAST;
9583 nvram_cmd |= NVRAM_CMD_LAST;
9585 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9586 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9587 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9588 (tp->nvram_jedecnum == JEDEC_ST) &&
9589 (nvram_cmd & NVRAM_CMD_FIRST)) {
9591 if ((ret = tg3_nvram_exec_cmd(tp,
9592 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9597 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9598 /* We always do complete word writes to eeprom. */
9599 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9602 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9608 /* offset and length are dword aligned */
9609 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9613 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9614 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9615 ~GRC_LCLCTRL_GPIO_OUTPUT1);
9619 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9620 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9625 ret = tg3_nvram_lock(tp);
9629 tg3_enable_nvram_access(tp);
9630 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9631 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9632 tw32(NVRAM_WRITE1, 0x406);
9634 grc_mode = tr32(GRC_MODE);
9635 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9637 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9638 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9640 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9644 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9648 grc_mode = tr32(GRC_MODE);
9649 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9651 tg3_disable_nvram_access(tp);
9652 tg3_nvram_unlock(tp);
9655 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9656 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9663 struct subsys_tbl_ent {
9664 u16 subsys_vendor, subsys_devid;
9668 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9669 /* Broadcom boards. */
9670 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9671 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9672 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9673 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9674 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9675 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9676 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9677 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9678 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9679 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9680 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9683 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9684 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9685 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9686 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9687 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9690 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9691 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9692 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9693 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9695 /* Compaq boards. */
9696 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9697 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9698 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9699 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9700 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9703 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9706 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9710 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9711 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9712 tp->pdev->subsystem_vendor) &&
9713 (subsys_id_to_phy_id[i].subsys_devid ==
9714 tp->pdev->subsystem_device))
9715 return &subsys_id_to_phy_id[i];
9720 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9725 /* On some early chips the SRAM cannot be accessed in D3hot state,
9726 * so need make sure we're in D0.
9728 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9729 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9730 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9733 /* Make sure register accesses (indirect or otherwise)
9734 * will function correctly.
9736 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9737 tp->misc_host_ctrl);
9739 /* The memory arbiter has to be enabled in order for SRAM accesses
9740 * to succeed. Normally on powerup the tg3 chip firmware will make
9741 * sure it is enabled, but other entities such as system netboot
9742 * code might disable it.
9744 val = tr32(MEMARB_MODE);
9745 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9747 tp->phy_id = PHY_ID_INVALID;
9748 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9750 /* Assume an onboard device by default. */
9751 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9753 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9754 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9755 u32 nic_cfg, led_cfg;
9756 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9757 int eeprom_phy_serdes = 0;
9759 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9760 tp->nic_sram_data_cfg = nic_cfg;
9762 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9763 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9764 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9765 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9766 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9767 (ver > 0) && (ver < 0x100))
9768 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9770 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9771 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9772 eeprom_phy_serdes = 1;
9774 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9775 if (nic_phy_id != 0) {
9776 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9777 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9779 eeprom_phy_id = (id1 >> 16) << 10;
9780 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9781 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9785 tp->phy_id = eeprom_phy_id;
9786 if (eeprom_phy_serdes) {
9787 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9788 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9790 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9793 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9794 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9795 SHASTA_EXT_LED_MODE_MASK);
9797 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9801 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9802 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9805 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9806 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9809 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9810 tp->led_ctrl = LED_CTRL_MODE_MAC;
9812 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9813 * read on some older 5700/5701 bootcode.
9815 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9817 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9819 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9823 case SHASTA_EXT_LED_SHARED:
9824 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9825 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9826 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9827 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9828 LED_CTRL_MODE_PHY_2);
9831 case SHASTA_EXT_LED_MAC:
9832 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9835 case SHASTA_EXT_LED_COMBO:
9836 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9837 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9838 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9839 LED_CTRL_MODE_PHY_2);
9844 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9846 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9847 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9849 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9850 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9852 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9854 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9855 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9856 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9857 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9859 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9860 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9862 if (cfg2 & (1 << 17))
9863 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9865 /* serdes signal pre-emphasis in register 0x590 set by */
9866 /* bootcode if bit 18 is set */
9867 if (cfg2 & (1 << 18))
9868 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9872 static int __devinit tg3_phy_probe(struct tg3 *tp)
9874 u32 hw_phy_id_1, hw_phy_id_2;
9875 u32 hw_phy_id, hw_phy_id_masked;
9878 /* Reading the PHY ID register can conflict with ASF
9879 * firwmare access to the PHY hardware.
9882 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9883 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9885 /* Now read the physical PHY_ID from the chip and verify
9886 * that it is sane. If it doesn't look good, we fall back
9887 * to either the hard-coded table based PHY_ID and failing
9888 * that the value found in the eeprom area.
9890 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9891 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9893 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9894 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9895 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9897 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9900 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9901 tp->phy_id = hw_phy_id;
9902 if (hw_phy_id_masked == PHY_ID_BCM8002)
9903 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9905 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9907 if (tp->phy_id != PHY_ID_INVALID) {
9908 /* Do nothing, phy ID already set up in
9909 * tg3_get_eeprom_hw_cfg().
9912 struct subsys_tbl_ent *p;
9914 /* No eeprom signature? Try the hardcoded
9915 * subsys device table.
9917 p = lookup_by_subsys(tp);
9921 tp->phy_id = p->phy_id;
9923 tp->phy_id == PHY_ID_BCM8002)
9924 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9928 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9929 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9930 u32 bmsr, adv_reg, tg3_ctrl;
9932 tg3_readphy(tp, MII_BMSR, &bmsr);
9933 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9934 (bmsr & BMSR_LSTATUS))
9935 goto skip_phy_reset;
9937 err = tg3_phy_reset(tp);
9941 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9942 ADVERTISE_100HALF | ADVERTISE_100FULL |
9943 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9945 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9946 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9947 MII_TG3_CTRL_ADV_1000_FULL);
9948 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9949 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9950 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9951 MII_TG3_CTRL_ENABLE_AS_MASTER);
9954 if (!tg3_copper_is_advertising_all(tp)) {
9955 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9957 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9958 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9960 tg3_writephy(tp, MII_BMCR,
9961 BMCR_ANENABLE | BMCR_ANRESTART);
9963 tg3_phy_set_wirespeed(tp);
9965 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9966 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9967 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9971 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9972 err = tg3_init_5401phy_dsp(tp);
9977 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9978 err = tg3_init_5401phy_dsp(tp);
9981 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9982 tp->link_config.advertising =
9983 (ADVERTISED_1000baseT_Half |
9984 ADVERTISED_1000baseT_Full |
9985 ADVERTISED_Autoneg |
9987 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9988 tp->link_config.advertising &=
9989 ~(ADVERTISED_1000baseT_Half |
9990 ADVERTISED_1000baseT_Full);
9995 static void __devinit tg3_read_partno(struct tg3 *tp)
9997 unsigned char vpd_data[256];
10001 if (tg3_nvram_read_swab(tp, 0x0, &magic))
10002 goto out_not_found;
10004 if (magic == TG3_EEPROM_MAGIC) {
10005 for (i = 0; i < 256; i += 4) {
10008 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10009 goto out_not_found;
10011 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10012 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10013 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10014 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10019 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10020 for (i = 0; i < 256; i += 4) {
10024 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10026 while (j++ < 100) {
10027 pci_read_config_word(tp->pdev, vpd_cap +
10028 PCI_VPD_ADDR, &tmp16);
10029 if (tmp16 & 0x8000)
10033 if (!(tmp16 & 0x8000))
10034 goto out_not_found;
10036 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10038 tmp = cpu_to_le32(tmp);
10039 memcpy(&vpd_data[i], &tmp, 4);
10043 /* Now parse and find the part number. */
10044 for (i = 0; i < 256; ) {
10045 unsigned char val = vpd_data[i];
10048 if (val == 0x82 || val == 0x91) {
10051 (vpd_data[i + 2] << 8)));
10056 goto out_not_found;
10058 block_end = (i + 3 +
10060 (vpd_data[i + 2] << 8)));
10062 while (i < block_end) {
10063 if (vpd_data[i + 0] == 'P' &&
10064 vpd_data[i + 1] == 'N') {
10065 int partno_len = vpd_data[i + 2];
10067 if (partno_len > 24)
10068 goto out_not_found;
10070 memcpy(tp->board_part_number,
10079 /* Part number not found. */
10080 goto out_not_found;
10084 strcpy(tp->board_part_number, "none");
10087 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10089 u32 val, offset, start;
10091 if (tg3_nvram_read_swab(tp, 0, &val))
10094 if (val != TG3_EEPROM_MAGIC)
10097 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10098 tg3_nvram_read_swab(tp, 0x4, &start))
10101 offset = tg3_nvram_logical_addr(tp, offset);
10102 if (tg3_nvram_read_swab(tp, offset, &val))
10105 if ((val & 0xfc000000) == 0x0c000000) {
10106 u32 ver_offset, addr;
10109 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10110 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10116 addr = offset + ver_offset - start;
10117 for (i = 0; i < 16; i += 4) {
10118 if (tg3_nvram_read(tp, addr + i, &val))
10121 val = cpu_to_le32(val);
10122 memcpy(tp->fw_ver + i, &val, 4);
10127 static int __devinit tg3_get_invariants(struct tg3 *tp)
10129 static struct pci_device_id write_reorder_chipsets[] = {
10130 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10131 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10132 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10133 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10134 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10135 PCI_DEVICE_ID_VIA_8385_0) },
10139 u32 cacheline_sz_reg;
10140 u32 pci_state_reg, grc_misc_cfg;
10145 /* Force memory write invalidate off. If we leave it on,
10146 * then on 5700_BX chips we have to enable a workaround.
10147 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10148 * to match the cacheline size. The Broadcom driver have this
10149 * workaround but turns MWI off all the times so never uses
10150 * it. This seems to suggest that the workaround is insufficient.
10152 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10153 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10154 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10156 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10157 * has the register indirect write enable bit set before
10158 * we try to access any of the MMIO registers. It is also
10159 * critical that the PCI-X hw workaround situation is decided
10160 * before that as well.
10162 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10165 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10166 MISC_HOST_CTRL_CHIPREV_SHIFT);
10168 /* Wrong chip ID in 5752 A0. This code can be removed later
10169 * as A0 is not in production.
10171 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10172 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10174 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10175 * we need to disable memory and use config. cycles
10176 * only to access all registers. The 5702/03 chips
10177 * can mistakenly decode the special cycles from the
10178 * ICH chipsets as memory write cycles, causing corruption
10179 * of register and memory space. Only certain ICH bridges
10180 * will drive special cycles with non-zero data during the
10181 * address phase which can fall within the 5703's address
10182 * range. This is not an ICH bug as the PCI spec allows
10183 * non-zero address during special cycles. However, only
10184 * these ICH bridges are known to drive non-zero addresses
10185 * during special cycles.
10187 * Since special cycles do not cross PCI bridges, we only
10188 * enable this workaround if the 5703 is on the secondary
10189 * bus of these ICH bridges.
10191 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10192 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10193 static struct tg3_dev_id {
10197 } ich_chipsets[] = {
10198 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10200 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10202 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10204 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10208 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10209 struct pci_dev *bridge = NULL;
10211 while (pci_id->vendor != 0) {
10212 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10218 if (pci_id->rev != PCI_ANY_ID) {
10221 pci_read_config_byte(bridge, PCI_REVISION_ID,
10223 if (rev > pci_id->rev)
10226 if (bridge->subordinate &&
10227 (bridge->subordinate->number ==
10228 tp->pdev->bus->number)) {
10230 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10231 pci_dev_put(bridge);
10237 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10238 * DMA addresses > 40-bit. This bridge may have other additional
10239 * 57xx devices behind it in some 4-port NIC designs for example.
10240 * Any tg3 device found behind the bridge will also need the 40-bit
10243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10245 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10246 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10247 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10250 struct pci_dev *bridge = NULL;
10253 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10254 PCI_DEVICE_ID_SERVERWORKS_EPB,
10256 if (bridge && bridge->subordinate &&
10257 (bridge->subordinate->number <=
10258 tp->pdev->bus->number) &&
10259 (bridge->subordinate->subordinate >=
10260 tp->pdev->bus->number)) {
10261 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10262 pci_dev_put(bridge);
10268 /* Initialize misc host control in PCI block. */
10269 tp->misc_host_ctrl |= (misc_ctrl_reg &
10270 MISC_HOST_CTRL_CHIPREV);
10271 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10272 tp->misc_host_ctrl);
10274 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10275 &cacheline_sz_reg);
10277 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10278 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10279 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10280 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10286 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10287 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10289 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10290 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10291 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10293 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10296 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10297 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10299 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10300 TG3_FLG2_HW_TSO_1_BUG;
10301 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10303 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10304 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10308 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10309 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10310 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10311 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10312 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10313 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10315 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10316 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10318 /* If we have an AMD 762 or VIA K8T800 chipset, write
10319 * reordering to the mailbox registers done by the host
10320 * controller can cause major troubles. We read back from
10321 * every mailbox register write to force the writes to be
10322 * posted to the chip in order.
10324 if (pci_dev_present(write_reorder_chipsets) &&
10325 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10326 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10329 tp->pci_lat_timer < 64) {
10330 tp->pci_lat_timer = 64;
10332 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10333 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10334 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10335 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10337 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10341 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10344 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10345 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10347 /* If this is a 5700 BX chipset, and we are in PCI-X
10348 * mode, enable register write workaround.
10350 * The workaround is to use indirect register accesses
10351 * for all chip writes not to mailbox registers.
10353 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10357 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10359 /* The chip can have it's power management PCI config
10360 * space registers clobbered due to this bug.
10361 * So explicitly force the chip into D0 here.
10363 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10365 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10366 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10367 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10370 /* Also, force SERR#/PERR# in PCI command. */
10371 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10372 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10373 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10377 /* 5700 BX chips need to have their TX producer index mailboxes
10378 * written twice to workaround a bug.
10380 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10381 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10383 /* Back to back register writes can cause problems on this chip,
10384 * the workaround is to read back all reg writes except those to
10385 * mailbox regs. See tg3_write_indirect_reg32().
10387 * PCI Express 5750_A0 rev chips need this workaround too.
10389 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10390 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10391 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10392 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10394 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10395 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10396 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10397 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10399 /* Chip-specific fixup from Broadcom driver */
10400 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10401 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10402 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10403 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10406 /* Default fast path register access methods */
10407 tp->read32 = tg3_read32;
10408 tp->write32 = tg3_write32;
10409 tp->read32_mbox = tg3_read32;
10410 tp->write32_mbox = tg3_write32;
10411 tp->write32_tx_mbox = tg3_write32;
10412 tp->write32_rx_mbox = tg3_write32;
10414 /* Various workaround register access methods */
10415 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10416 tp->write32 = tg3_write_indirect_reg32;
10417 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10418 tp->write32 = tg3_write_flush_reg32;
10420 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10421 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10422 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10423 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10424 tp->write32_rx_mbox = tg3_write_flush_reg32;
10427 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10428 tp->read32 = tg3_read_indirect_reg32;
10429 tp->write32 = tg3_write_indirect_reg32;
10430 tp->read32_mbox = tg3_read_indirect_mbox;
10431 tp->write32_mbox = tg3_write_indirect_mbox;
10432 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10433 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10438 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10439 pci_cmd &= ~PCI_COMMAND_MEMORY;
10440 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10443 if (tp->write32 == tg3_write_indirect_reg32 ||
10444 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10445 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10447 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10449 /* Get eeprom hw config before calling tg3_set_power_state().
10450 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10451 * determined before calling tg3_set_power_state() so that
10452 * we know whether or not to switch out of Vaux power.
10453 * When the flag is set, it means that GPIO1 is used for eeprom
10454 * write protect and also implies that it is a LOM where GPIOs
10455 * are not used to switch power.
10457 tg3_get_eeprom_hw_cfg(tp);
10459 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10460 * GPIO1 driven high will bring 5700's external PHY out of reset.
10461 * It is also used as eeprom write protect on LOMs.
10463 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10464 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10465 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10466 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10467 GRC_LCLCTRL_GPIO_OUTPUT1);
10468 /* Unused GPIO3 must be driven as output on 5752 because there
10469 * are no pull-up resistors on unused GPIO pins.
10471 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10472 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10474 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10475 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10477 /* Force the chip into D0. */
10478 err = tg3_set_power_state(tp, PCI_D0);
10480 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10481 pci_name(tp->pdev));
10485 /* 5700 B0 chips do not support checksumming correctly due
10486 * to hardware bugs.
10488 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10489 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10491 /* Derive initial jumbo mode from MTU assigned in
10492 * ether_setup() via the alloc_etherdev() call
10494 if (tp->dev->mtu > ETH_DATA_LEN &&
10495 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10496 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10498 /* Determine WakeOnLan speed to use. */
10499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10500 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10501 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10502 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10503 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10505 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10508 /* A few boards don't want Ethernet@WireSpeed phy feature */
10509 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10510 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10511 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10512 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10513 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10514 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10516 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10517 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10518 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10519 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10520 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10522 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10525 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10527 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10530 tp->coalesce_mode = 0;
10531 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10532 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10533 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10535 /* Initialize MAC MI mode, polling disabled. */
10536 tw32_f(MAC_MI_MODE, tp->mi_mode);
10539 /* Initialize data/descriptor byte/word swapping. */
10540 val = tr32(GRC_MODE);
10541 val &= GRC_MODE_HOST_STACKUP;
10542 tw32(GRC_MODE, val | tp->grc_mode);
10544 tg3_switch_clocks(tp);
10546 /* Clear this out for sanity. */
10547 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10549 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10551 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10552 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10553 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10555 if (chiprevid == CHIPREV_ID_5701_A0 ||
10556 chiprevid == CHIPREV_ID_5701_B0 ||
10557 chiprevid == CHIPREV_ID_5701_B2 ||
10558 chiprevid == CHIPREV_ID_5701_B5) {
10559 void __iomem *sram_base;
10561 /* Write some dummy words into the SRAM status block
10562 * area, see if it reads back correctly. If the return
10563 * value is bad, force enable the PCIX workaround.
10565 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10567 writel(0x00000000, sram_base);
10568 writel(0x00000000, sram_base + 4);
10569 writel(0xffffffff, sram_base + 4);
10570 if (readl(sram_base) != 0x00000000)
10571 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10576 tg3_nvram_init(tp);
10578 grc_misc_cfg = tr32(GRC_MISC_CFG);
10579 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10581 /* Broadcom's driver says that CIOBE multisplit has a bug */
10583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10584 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10585 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10586 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10590 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10591 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10592 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10594 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10595 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10596 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10597 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10598 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10599 HOSTCC_MODE_CLRTICK_TXBD);
10601 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10602 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10603 tp->misc_host_ctrl);
10606 /* these are limited to 10/100 only */
10607 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10608 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10609 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10610 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10611 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10612 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10613 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10614 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10615 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10616 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10617 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10619 err = tg3_phy_probe(tp);
10621 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10622 pci_name(tp->pdev), err);
10623 /* ... but do not return immediately ... */
10626 tg3_read_partno(tp);
10627 tg3_read_fw_ver(tp);
10629 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10630 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10633 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10635 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10638 /* 5700 {AX,BX} chips have a broken status block link
10639 * change bit implementation, so we must use the
10640 * status register in those cases.
10642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10643 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10645 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10647 /* The led_ctrl is set during tg3_phy_probe, here we might
10648 * have to force the link status polling mechanism based
10649 * upon subsystem IDs.
10651 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10652 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10653 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10654 TG3_FLAG_USE_LINKCHG_REG);
10657 /* For all SERDES we poll the MAC status register. */
10658 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10659 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10661 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10663 /* All chips before 5787 can get confused if TX buffers
10664 * straddle the 4GB address boundary in some cases.
10666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10668 tp->dev->hard_start_xmit = tg3_start_xmit;
10670 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10674 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10677 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10679 /* Increment the rx prod index on the rx std ring by at most
10680 * 8 for these chips to workaround hw errata.
10682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10685 tp->rx_std_max_post = 8;
10687 /* By default, disable wake-on-lan. User can change this
10688 * using ETHTOOL_SWOL.
10690 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10695 #ifdef CONFIG_SPARC64
10696 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10698 struct net_device *dev = tp->dev;
10699 struct pci_dev *pdev = tp->pdev;
10700 struct pcidev_cookie *pcp = pdev->sysdata;
10703 unsigned char *addr;
10706 addr = of_get_property(pcp->prom_node, "local-mac-address",
10708 if (addr && len == 6) {
10709 memcpy(dev->dev_addr, addr, 6);
10710 memcpy(dev->perm_addr, dev->dev_addr, 6);
10717 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10719 struct net_device *dev = tp->dev;
10721 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10722 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10727 static int __devinit tg3_get_device_address(struct tg3 *tp)
10729 struct net_device *dev = tp->dev;
10730 u32 hi, lo, mac_offset;
10733 #ifdef CONFIG_SPARC64
10734 if (!tg3_get_macaddr_sparc(tp))
10739 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10740 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10741 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10743 if (tg3_nvram_lock(tp))
10744 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10746 tg3_nvram_unlock(tp);
10749 /* First try to get it from MAC address mailbox. */
10750 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10751 if ((hi >> 16) == 0x484b) {
10752 dev->dev_addr[0] = (hi >> 8) & 0xff;
10753 dev->dev_addr[1] = (hi >> 0) & 0xff;
10755 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10756 dev->dev_addr[2] = (lo >> 24) & 0xff;
10757 dev->dev_addr[3] = (lo >> 16) & 0xff;
10758 dev->dev_addr[4] = (lo >> 8) & 0xff;
10759 dev->dev_addr[5] = (lo >> 0) & 0xff;
10761 /* Some old bootcode may report a 0 MAC address in SRAM */
10762 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10765 /* Next, try NVRAM. */
10766 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10767 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10768 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10769 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10770 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10771 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10772 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10773 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10775 /* Finally just fetch it out of the MAC control regs. */
10777 hi = tr32(MAC_ADDR_0_HIGH);
10778 lo = tr32(MAC_ADDR_0_LOW);
10780 dev->dev_addr[5] = lo & 0xff;
10781 dev->dev_addr[4] = (lo >> 8) & 0xff;
10782 dev->dev_addr[3] = (lo >> 16) & 0xff;
10783 dev->dev_addr[2] = (lo >> 24) & 0xff;
10784 dev->dev_addr[1] = hi & 0xff;
10785 dev->dev_addr[0] = (hi >> 8) & 0xff;
10789 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10790 #ifdef CONFIG_SPARC64
10791 if (!tg3_get_default_macaddr_sparc(tp))
10796 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10800 #define BOUNDARY_SINGLE_CACHELINE 1
10801 #define BOUNDARY_MULTI_CACHELINE 2
10803 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10805 int cacheline_size;
10809 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10811 cacheline_size = 1024;
10813 cacheline_size = (int) byte * 4;
10815 /* On 5703 and later chips, the boundary bits have no
10818 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10819 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10820 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10823 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10824 goal = BOUNDARY_MULTI_CACHELINE;
10826 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10827 goal = BOUNDARY_SINGLE_CACHELINE;
10836 /* PCI controllers on most RISC systems tend to disconnect
10837 * when a device tries to burst across a cache-line boundary.
10838 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10840 * Unfortunately, for PCI-E there are only limited
10841 * write-side controls for this, and thus for reads
10842 * we will still get the disconnects. We'll also waste
10843 * these PCI cycles for both read and write for chips
10844 * other than 5700 and 5701 which do not implement the
10847 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10848 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10849 switch (cacheline_size) {
10854 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10855 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10856 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10858 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10859 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10864 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10865 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10869 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10870 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10873 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10874 switch (cacheline_size) {
10878 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10879 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10880 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10886 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10887 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10891 switch (cacheline_size) {
10893 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10894 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10895 DMA_RWCTRL_WRITE_BNDRY_16);
10900 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10901 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10902 DMA_RWCTRL_WRITE_BNDRY_32);
10907 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10908 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10909 DMA_RWCTRL_WRITE_BNDRY_64);
10914 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10915 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10916 DMA_RWCTRL_WRITE_BNDRY_128);
10921 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10922 DMA_RWCTRL_WRITE_BNDRY_256);
10925 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10926 DMA_RWCTRL_WRITE_BNDRY_512);
10930 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10931 DMA_RWCTRL_WRITE_BNDRY_1024);
10940 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10942 struct tg3_internal_buffer_desc test_desc;
10943 u32 sram_dma_descs;
10946 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10948 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10949 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10950 tw32(RDMAC_STATUS, 0);
10951 tw32(WDMAC_STATUS, 0);
10953 tw32(BUFMGR_MODE, 0);
10954 tw32(FTQ_RESET, 0);
10956 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10957 test_desc.addr_lo = buf_dma & 0xffffffff;
10958 test_desc.nic_mbuf = 0x00002100;
10959 test_desc.len = size;
10962 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10963 * the *second* time the tg3 driver was getting loaded after an
10966 * Broadcom tells me:
10967 * ...the DMA engine is connected to the GRC block and a DMA
10968 * reset may affect the GRC block in some unpredictable way...
10969 * The behavior of resets to individual blocks has not been tested.
10971 * Broadcom noted the GRC reset will also reset all sub-components.
10974 test_desc.cqid_sqid = (13 << 8) | 2;
10976 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10979 test_desc.cqid_sqid = (16 << 8) | 7;
10981 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10984 test_desc.flags = 0x00000005;
10986 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10989 val = *(((u32 *)&test_desc) + i);
10990 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10991 sram_dma_descs + (i * sizeof(u32)));
10992 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10994 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10997 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10999 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11003 for (i = 0; i < 40; i++) {
11007 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11009 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11010 if ((val & 0xffff) == sram_dma_descs) {
11021 #define TEST_BUFFER_SIZE 0x2000
11023 static int __devinit tg3_test_dma(struct tg3 *tp)
11025 dma_addr_t buf_dma;
11026 u32 *buf, saved_dma_rwctrl;
11029 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11035 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11036 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11038 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11040 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11041 /* DMA read watermark not used on PCIE */
11042 tp->dma_rwctrl |= 0x00180000;
11043 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11046 tp->dma_rwctrl |= 0x003f0000;
11048 tp->dma_rwctrl |= 0x003f000f;
11050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11052 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11054 /* If the 5704 is behind the EPB bridge, we can
11055 * do the less restrictive ONE_DMA workaround for
11056 * better performance.
11058 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11060 tp->dma_rwctrl |= 0x8000;
11061 else if (ccval == 0x6 || ccval == 0x7)
11062 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11064 /* Set bit 23 to enable PCIX hw bug fix */
11065 tp->dma_rwctrl |= 0x009f0000;
11066 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11067 /* 5780 always in PCIX mode */
11068 tp->dma_rwctrl |= 0x00144000;
11069 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11070 /* 5714 always in PCIX mode */
11071 tp->dma_rwctrl |= 0x00148000;
11073 tp->dma_rwctrl |= 0x001b000f;
11077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11079 tp->dma_rwctrl &= 0xfffffff0;
11081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11083 /* Remove this if it causes problems for some boards. */
11084 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11086 /* On 5700/5701 chips, we need to set this bit.
11087 * Otherwise the chip will issue cacheline transactions
11088 * to streamable DMA memory with not all the byte
11089 * enables turned on. This is an error on several
11090 * RISC PCI controllers, in particular sparc64.
11092 * On 5703/5704 chips, this bit has been reassigned
11093 * a different meaning. In particular, it is used
11094 * on those chips to enable a PCI-X workaround.
11096 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11099 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11102 /* Unneeded, already done by tg3_get_invariants. */
11103 tg3_switch_clocks(tp);
11107 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11108 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11111 /* It is best to perform DMA test with maximum write burst size
11112 * to expose the 5700/5701 write DMA bug.
11114 saved_dma_rwctrl = tp->dma_rwctrl;
11115 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11116 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11121 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11124 /* Send the buffer to the chip. */
11125 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11127 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11132 /* validate data reached card RAM correctly. */
11133 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11135 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11136 if (le32_to_cpu(val) != p[i]) {
11137 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11138 /* ret = -ENODEV here? */
11143 /* Now read it back. */
11144 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11146 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11152 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11156 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11157 DMA_RWCTRL_WRITE_BNDRY_16) {
11158 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11159 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11160 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11163 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11169 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11175 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11176 DMA_RWCTRL_WRITE_BNDRY_16) {
11177 static struct pci_device_id dma_wait_state_chipsets[] = {
11178 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11179 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11183 /* DMA test passed without adjusting DMA boundary,
11184 * now look for chipsets that are known to expose the
11185 * DMA bug without failing the test.
11187 if (pci_dev_present(dma_wait_state_chipsets)) {
11188 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11189 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11192 /* Safe to use the calculated DMA boundary. */
11193 tp->dma_rwctrl = saved_dma_rwctrl;
11195 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11199 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11204 static void __devinit tg3_init_link_config(struct tg3 *tp)
11206 tp->link_config.advertising =
11207 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11208 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11209 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11210 ADVERTISED_Autoneg | ADVERTISED_MII);
11211 tp->link_config.speed = SPEED_INVALID;
11212 tp->link_config.duplex = DUPLEX_INVALID;
11213 tp->link_config.autoneg = AUTONEG_ENABLE;
11214 tp->link_config.active_speed = SPEED_INVALID;
11215 tp->link_config.active_duplex = DUPLEX_INVALID;
11216 tp->link_config.phy_is_low_power = 0;
11217 tp->link_config.orig_speed = SPEED_INVALID;
11218 tp->link_config.orig_duplex = DUPLEX_INVALID;
11219 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11222 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11224 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11225 tp->bufmgr_config.mbuf_read_dma_low_water =
11226 DEFAULT_MB_RDMA_LOW_WATER_5705;
11227 tp->bufmgr_config.mbuf_mac_rx_low_water =
11228 DEFAULT_MB_MACRX_LOW_WATER_5705;
11229 tp->bufmgr_config.mbuf_high_water =
11230 DEFAULT_MB_HIGH_WATER_5705;
11232 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11233 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11234 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11235 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11236 tp->bufmgr_config.mbuf_high_water_jumbo =
11237 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11239 tp->bufmgr_config.mbuf_read_dma_low_water =
11240 DEFAULT_MB_RDMA_LOW_WATER;
11241 tp->bufmgr_config.mbuf_mac_rx_low_water =
11242 DEFAULT_MB_MACRX_LOW_WATER;
11243 tp->bufmgr_config.mbuf_high_water =
11244 DEFAULT_MB_HIGH_WATER;
11246 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11247 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11248 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11249 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11250 tp->bufmgr_config.mbuf_high_water_jumbo =
11251 DEFAULT_MB_HIGH_WATER_JUMBO;
11254 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11255 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11258 static char * __devinit tg3_phy_string(struct tg3 *tp)
11260 switch (tp->phy_id & PHY_ID_MASK) {
11261 case PHY_ID_BCM5400: return "5400";
11262 case PHY_ID_BCM5401: return "5401";
11263 case PHY_ID_BCM5411: return "5411";
11264 case PHY_ID_BCM5701: return "5701";
11265 case PHY_ID_BCM5703: return "5703";
11266 case PHY_ID_BCM5704: return "5704";
11267 case PHY_ID_BCM5705: return "5705";
11268 case PHY_ID_BCM5750: return "5750";
11269 case PHY_ID_BCM5752: return "5752";
11270 case PHY_ID_BCM5714: return "5714";
11271 case PHY_ID_BCM5780: return "5780";
11272 case PHY_ID_BCM5755: return "5755";
11273 case PHY_ID_BCM5787: return "5787";
11274 case PHY_ID_BCM8002: return "8002/serdes";
11275 case 0: return "serdes";
11276 default: return "unknown";
11280 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11282 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11283 strcpy(str, "PCI Express");
11285 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11286 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11288 strcpy(str, "PCIX:");
11290 if ((clock_ctrl == 7) ||
11291 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11292 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11293 strcat(str, "133MHz");
11294 else if (clock_ctrl == 0)
11295 strcat(str, "33MHz");
11296 else if (clock_ctrl == 2)
11297 strcat(str, "50MHz");
11298 else if (clock_ctrl == 4)
11299 strcat(str, "66MHz");
11300 else if (clock_ctrl == 6)
11301 strcat(str, "100MHz");
11303 strcpy(str, "PCI:");
11304 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11305 strcat(str, "66MHz");
11307 strcat(str, "33MHz");
11309 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11310 strcat(str, ":32-bit");
11312 strcat(str, ":64-bit");
11316 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11318 struct pci_dev *peer;
11319 unsigned int func, devnr = tp->pdev->devfn & ~7;
11321 for (func = 0; func < 8; func++) {
11322 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11323 if (peer && peer != tp->pdev)
11327 /* 5704 can be configured in single-port mode, set peer to
11328 * tp->pdev in that case.
11336 * We don't need to keep the refcount elevated; there's no way
11337 * to remove one half of this device without removing the other
11344 static void __devinit tg3_init_coal(struct tg3 *tp)
11346 struct ethtool_coalesce *ec = &tp->coal;
11348 memset(ec, 0, sizeof(*ec));
11349 ec->cmd = ETHTOOL_GCOALESCE;
11350 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11351 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11352 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11353 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11354 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11355 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11356 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11357 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11358 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11360 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11361 HOSTCC_MODE_CLRTICK_TXBD)) {
11362 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11363 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11364 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11365 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11368 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11369 ec->rx_coalesce_usecs_irq = 0;
11370 ec->tx_coalesce_usecs_irq = 0;
11371 ec->stats_block_coalesce_usecs = 0;
11375 static int __devinit tg3_init_one(struct pci_dev *pdev,
11376 const struct pci_device_id *ent)
11378 static int tg3_version_printed = 0;
11379 unsigned long tg3reg_base, tg3reg_len;
11380 struct net_device *dev;
11382 int i, err, pm_cap;
11384 u64 dma_mask, persist_dma_mask;
11386 if (tg3_version_printed++ == 0)
11387 printk(KERN_INFO "%s", version);
11389 err = pci_enable_device(pdev);
11391 printk(KERN_ERR PFX "Cannot enable PCI device, "
11396 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11397 printk(KERN_ERR PFX "Cannot find proper PCI device "
11398 "base address, aborting.\n");
11400 goto err_out_disable_pdev;
11403 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11405 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11407 goto err_out_disable_pdev;
11410 pci_set_master(pdev);
11412 /* Find power-management capability. */
11413 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11415 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11418 goto err_out_free_res;
11421 tg3reg_base = pci_resource_start(pdev, 0);
11422 tg3reg_len = pci_resource_len(pdev, 0);
11424 dev = alloc_etherdev(sizeof(*tp));
11426 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11428 goto err_out_free_res;
11431 SET_MODULE_OWNER(dev);
11432 SET_NETDEV_DEV(dev, &pdev->dev);
11434 #if TG3_VLAN_TAG_USED
11435 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11436 dev->vlan_rx_register = tg3_vlan_rx_register;
11437 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11440 tp = netdev_priv(dev);
11443 tp->pm_cap = pm_cap;
11444 tp->mac_mode = TG3_DEF_MAC_MODE;
11445 tp->rx_mode = TG3_DEF_RX_MODE;
11446 tp->tx_mode = TG3_DEF_TX_MODE;
11447 tp->mi_mode = MAC_MI_MODE_BASE;
11449 tp->msg_enable = tg3_debug;
11451 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11453 /* The word/byte swap controls here control register access byte
11454 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11457 tp->misc_host_ctrl =
11458 MISC_HOST_CTRL_MASK_PCI_INT |
11459 MISC_HOST_CTRL_WORD_SWAP |
11460 MISC_HOST_CTRL_INDIR_ACCESS |
11461 MISC_HOST_CTRL_PCISTATE_RW;
11463 /* The NONFRM (non-frame) byte/word swap controls take effect
11464 * on descriptor entries, anything which isn't packet data.
11466 * The StrongARM chips on the board (one for tx, one for rx)
11467 * are running in big-endian mode.
11469 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11470 GRC_MODE_WSWAP_NONFRM_DATA);
11471 #ifdef __BIG_ENDIAN
11472 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11474 spin_lock_init(&tp->lock);
11475 spin_lock_init(&tp->tx_lock);
11476 spin_lock_init(&tp->indirect_lock);
11477 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11479 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11480 if (tp->regs == 0UL) {
11481 printk(KERN_ERR PFX "Cannot map device registers, "
11484 goto err_out_free_dev;
11487 tg3_init_link_config(tp);
11489 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11490 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11491 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11493 dev->open = tg3_open;
11494 dev->stop = tg3_close;
11495 dev->get_stats = tg3_get_stats;
11496 dev->set_multicast_list = tg3_set_rx_mode;
11497 dev->set_mac_address = tg3_set_mac_addr;
11498 dev->do_ioctl = tg3_ioctl;
11499 dev->tx_timeout = tg3_tx_timeout;
11500 dev->poll = tg3_poll;
11501 dev->ethtool_ops = &tg3_ethtool_ops;
11503 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11504 dev->change_mtu = tg3_change_mtu;
11505 dev->irq = pdev->irq;
11506 #ifdef CONFIG_NET_POLL_CONTROLLER
11507 dev->poll_controller = tg3_poll_controller;
11510 err = tg3_get_invariants(tp);
11512 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11514 goto err_out_iounmap;
11517 /* The EPB bridge inside 5714, 5715, and 5780 and any
11518 * device behind the EPB cannot support DMA addresses > 40-bit.
11519 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11520 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11521 * do DMA address check in tg3_start_xmit().
11523 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11524 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11525 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11526 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11527 #ifdef CONFIG_HIGHMEM
11528 dma_mask = DMA_64BIT_MASK;
11531 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11533 /* Configure DMA attributes. */
11534 if (dma_mask > DMA_32BIT_MASK) {
11535 err = pci_set_dma_mask(pdev, dma_mask);
11537 dev->features |= NETIF_F_HIGHDMA;
11538 err = pci_set_consistent_dma_mask(pdev,
11541 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11542 "DMA for consistent allocations\n");
11543 goto err_out_iounmap;
11547 if (err || dma_mask == DMA_32BIT_MASK) {
11548 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11550 printk(KERN_ERR PFX "No usable DMA configuration, "
11552 goto err_out_iounmap;
11556 tg3_init_bufmgr_config(tp);
11558 #if TG3_TSO_SUPPORT != 0
11559 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11560 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11562 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11563 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11564 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11565 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11566 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11568 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11571 /* TSO is on by default on chips that support hardware TSO.
11572 * Firmware TSO on older chips gives lower performance, so it
11573 * is off by default, but can be enabled using ethtool.
11575 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11576 dev->features |= NETIF_F_TSO;
11577 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11578 dev->features |= NETIF_F_TSO6;
11583 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11584 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11585 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11586 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11587 tp->rx_pending = 63;
11590 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11591 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11592 tp->pdev_peer = tg3_find_peer(tp);
11594 err = tg3_get_device_address(tp);
11596 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11598 goto err_out_iounmap;
11602 * Reset chip in case UNDI or EFI driver did not shutdown
11603 * DMA self test will enable WDMAC and we'll see (spurious)
11604 * pending DMA on the PCI bus at that point.
11606 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11607 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11608 pci_save_state(tp->pdev);
11609 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11610 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11613 err = tg3_test_dma(tp);
11615 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11616 goto err_out_iounmap;
11619 /* Tigon3 can do ipv4 only... and some chips have buggy
11622 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11625 dev->features |= NETIF_F_HW_CSUM;
11627 dev->features |= NETIF_F_IP_CSUM;
11628 dev->features |= NETIF_F_SG;
11629 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11631 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11633 /* flow control autonegotiation is default behavior */
11634 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11638 /* Now that we have fully setup the chip, save away a snapshot
11639 * of the PCI config space. We need to restore this after
11640 * GRC_MISC_CFG core clock resets and some resume events.
11642 pci_save_state(tp->pdev);
11644 err = register_netdev(dev);
11646 printk(KERN_ERR PFX "Cannot register net device, "
11648 goto err_out_iounmap;
11651 pci_set_drvdata(pdev, dev);
11653 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11655 tp->board_part_number,
11656 tp->pci_chip_rev_id,
11657 tg3_phy_string(tp),
11658 tg3_bus_string(tp, str),
11659 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11661 for (i = 0; i < 6; i++)
11662 printk("%2.2x%c", dev->dev_addr[i],
11663 i == 5 ? '\n' : ':');
11665 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11666 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11669 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11670 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11671 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11672 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11673 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11674 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11675 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11676 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11677 dev->name, tp->dma_rwctrl,
11678 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11679 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11681 netif_carrier_off(tp->dev);
11695 pci_release_regions(pdev);
11697 err_out_disable_pdev:
11698 pci_disable_device(pdev);
11699 pci_set_drvdata(pdev, NULL);
11703 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11705 struct net_device *dev = pci_get_drvdata(pdev);
11708 struct tg3 *tp = netdev_priv(dev);
11710 flush_scheduled_work();
11711 unregister_netdev(dev);
11717 pci_release_regions(pdev);
11718 pci_disable_device(pdev);
11719 pci_set_drvdata(pdev, NULL);
11723 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11725 struct net_device *dev = pci_get_drvdata(pdev);
11726 struct tg3 *tp = netdev_priv(dev);
11729 if (!netif_running(dev))
11732 flush_scheduled_work();
11733 tg3_netif_stop(tp);
11735 del_timer_sync(&tp->timer);
11737 tg3_full_lock(tp, 1);
11738 tg3_disable_ints(tp);
11739 tg3_full_unlock(tp);
11741 netif_device_detach(dev);
11743 tg3_full_lock(tp, 0);
11744 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11745 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11746 tg3_full_unlock(tp);
11748 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11750 tg3_full_lock(tp, 0);
11752 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11753 if (tg3_restart_hw(tp, 1))
11756 tp->timer.expires = jiffies + tp->timer_offset;
11757 add_timer(&tp->timer);
11759 netif_device_attach(dev);
11760 tg3_netif_start(tp);
11763 tg3_full_unlock(tp);
11769 static int tg3_resume(struct pci_dev *pdev)
11771 struct net_device *dev = pci_get_drvdata(pdev);
11772 struct tg3 *tp = netdev_priv(dev);
11775 if (!netif_running(dev))
11778 pci_restore_state(tp->pdev);
11780 err = tg3_set_power_state(tp, PCI_D0);
11784 netif_device_attach(dev);
11786 tg3_full_lock(tp, 0);
11788 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11789 err = tg3_restart_hw(tp, 1);
11793 tp->timer.expires = jiffies + tp->timer_offset;
11794 add_timer(&tp->timer);
11796 tg3_netif_start(tp);
11799 tg3_full_unlock(tp);
11804 static struct pci_driver tg3_driver = {
11805 .name = DRV_MODULE_NAME,
11806 .id_table = tg3_pci_tbl,
11807 .probe = tg3_init_one,
11808 .remove = __devexit_p(tg3_remove_one),
11809 .suspend = tg3_suspend,
11810 .resume = tg3_resume
11813 static int __init tg3_init(void)
11815 return pci_module_init(&tg3_driver);
11818 static void __exit tg3_cleanup(void)
11820 pci_unregister_driver(&tg3_driver);
11823 module_init(tg3_init);
11824 module_exit(tg3_cleanup);